Skip to content

Commit

Permalink
sched: rt-group: heirarchy aware throttle
Browse files Browse the repository at this point in the history
The bandwidth throttle code dequeues a group when it runs out of quota, and
re-queues it once the period rolls over and the quota gets refreshed.

Sadly it failed to take the hierarchy into consideration. Share more of the
enqueue/dequeue code with regular task opterations.

Also, some operations like sched_setscheduler() can dequeue/enqueue tasks that
are in throttled runqueues, we should not inadvertly re-enqueue empty runqueues
so check for that.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Daniel K. <dk@uw.no>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 19, 2008
1 parent 7ea5661 commit ad2a3f1
Showing 1 changed file with 33 additions and 26 deletions.
59 changes: 33 additions & 26 deletions kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,13 +449,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#endif
}

static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);

if (group_rq && rt_rq_throttled(group_rq))
/*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return;

list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
Expand All @@ -464,7 +470,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
inc_rt_tasks(rt_se, rt_rq);
}

static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
Expand All @@ -480,19 +486,37 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
static void dequeue_rt_stack(struct task_struct *p)
static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
{
struct sched_rt_entity *rt_se, *back = NULL;
struct sched_rt_entity *back = NULL;

rt_se = &p->rt;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
}

for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se);
__dequeue_rt_entity(rt_se);
}
}

static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
{
dequeue_rt_stack(rt_se);
for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se);
}

static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
dequeue_rt_stack(rt_se);

for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);

if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se);
}
}

Expand All @@ -506,32 +530,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
if (wakeup)
rt_se->timeout = 0;

dequeue_rt_stack(p);

/*
* enqueue everybody, bottom - up.
*/
for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se);
enqueue_rt_entity(rt_se);
}

static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;

update_curr_rt(rq);

dequeue_rt_stack(p);

/*
* re-enqueue all non-empty rt_rq entities.
*/
for_each_sched_rt_entity(rt_se) {
rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se);
}
dequeue_rt_entity(rt_se);
}

/*
Expand Down

0 comments on commit ad2a3f1

Please sign in to comment.