Skip to content

Commit

Permalink
sched: disable standard balancer for RT tasks
Browse files Browse the repository at this point in the history
Since we now take an active approach to load balancing, we don't need to
balance RT tasks via the normal task balancer. In fact, this code was
found to pull RT tasks away from CPUS that the active movement performed,
resulting in large latencies.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Jan 25, 2008
1 parent 4642daf commit c7a1e46
Showing 1 changed file with 4 additions and 91 deletions.
95 changes: 4 additions & 91 deletions kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -567,109 +567,22 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
push_rt_tasks(rq);
}

/*
* Load-balancing iterator. Note: while the runqueue stays locked
* during the whole iteration, the current task might be
* dequeued so the iterator has to be dequeue-safe. Here we
* achieve that by always pre-iterating before returning
* the current task:
*/
static struct task_struct *load_balance_start_rt(void *arg)
{
struct rq *rq = arg;
struct rt_prio_array *array = &rq->rt.active;
struct list_head *head, *curr;
struct task_struct *p;
int idx;

idx = sched_find_first_bit(array->bitmap);
if (idx >= MAX_RT_PRIO)
return NULL;

head = array->queue + idx;
curr = head->prev;

p = list_entry(curr, struct task_struct, run_list);

curr = curr->prev;

rq->rt.rt_load_balance_idx = idx;
rq->rt.rt_load_balance_head = head;
rq->rt.rt_load_balance_curr = curr;

return p;
}

static struct task_struct *load_balance_next_rt(void *arg)
{
struct rq *rq = arg;
struct rt_prio_array *array = &rq->rt.active;
struct list_head *head, *curr;
struct task_struct *p;
int idx;

idx = rq->rt.rt_load_balance_idx;
head = rq->rt.rt_load_balance_head;
curr = rq->rt.rt_load_balance_curr;

/*
* If we arrived back to the head again then
* iterate to the next queue (if any):
*/
if (unlikely(head == curr)) {
int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);

if (next_idx >= MAX_RT_PRIO)
return NULL;

idx = next_idx;
head = array->queue + idx;
curr = head->prev;

rq->rt.rt_load_balance_idx = idx;
rq->rt.rt_load_balance_head = head;
}

p = list_entry(curr, struct task_struct, run_list);

curr = curr->prev;

rq->rt.rt_load_balance_curr = curr;

return p;
}

static unsigned long
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
struct rq_iterator rt_rq_iterator;

rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
/* pass 'busiest' rq argument into
* load_balance_[start|next]_rt iterators
*/
rt_rq_iterator.arg = busiest;

return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
idle, all_pinned, this_best_prio, &rt_rq_iterator);
/* don't touch RT tasks */
return 0;
}

static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct rq_iterator rt_rq_iterator;

rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
rt_rq_iterator.arg = busiest;

return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&rt_rq_iterator);
/* don't touch RT tasks */
return 0;
}
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)
Expand Down

0 comments on commit c7a1e46

Please sign in to comment.