Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 76129
b: refs/heads/master
c: c7a1e46
h: refs/heads/master
i:
  76127: 5c3c272
v: v3
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Jan 25, 2008
1 parent 4ccbd29 commit cc15734
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 92 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4642dafdf93dc7d66ee33437b93a5e6b8cea20d2
refs/heads/master: c7a1e46aa9782a947cf2ed506245d43396dbf991
95 changes: 4 additions & 91 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -567,109 +567,22 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
push_rt_tasks(rq);
}

/*
* Load-balancing iterator. Note: while the runqueue stays locked
* during the whole iteration, the current task might be
* dequeued so the iterator has to be dequeue-safe. Here we
* achieve that by always pre-iterating before returning
* the current task:
*/
static struct task_struct *load_balance_start_rt(void *arg)
{
struct rq *rq = arg;
struct rt_prio_array *array = &rq->rt.active;
struct list_head *head, *curr;
struct task_struct *p;
int idx;

idx = sched_find_first_bit(array->bitmap);
if (idx >= MAX_RT_PRIO)
return NULL;

head = array->queue + idx;
curr = head->prev;

p = list_entry(curr, struct task_struct, run_list);

curr = curr->prev;

rq->rt.rt_load_balance_idx = idx;
rq->rt.rt_load_balance_head = head;
rq->rt.rt_load_balance_curr = curr;

return p;
}

static struct task_struct *load_balance_next_rt(void *arg)
{
struct rq *rq = arg;
struct rt_prio_array *array = &rq->rt.active;
struct list_head *head, *curr;
struct task_struct *p;
int idx;

idx = rq->rt.rt_load_balance_idx;
head = rq->rt.rt_load_balance_head;
curr = rq->rt.rt_load_balance_curr;

/*
* If we arrived back to the head again then
* iterate to the next queue (if any):
*/
if (unlikely(head == curr)) {
int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);

if (next_idx >= MAX_RT_PRIO)
return NULL;

idx = next_idx;
head = array->queue + idx;
curr = head->prev;

rq->rt.rt_load_balance_idx = idx;
rq->rt.rt_load_balance_head = head;
}

p = list_entry(curr, struct task_struct, run_list);

curr = curr->prev;

rq->rt.rt_load_balance_curr = curr;

return p;
}

static unsigned long
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
struct rq_iterator rt_rq_iterator;

rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
/* pass 'busiest' rq argument into
* load_balance_[start|next]_rt iterators
*/
rt_rq_iterator.arg = busiest;

return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
idle, all_pinned, this_best_prio, &rt_rq_iterator);
/* don't touch RT tasks */
return 0;
}

static int
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct rq_iterator rt_rq_iterator;

rt_rq_iterator.start = load_balance_start_rt;
rt_rq_iterator.next = load_balance_next_rt;
rt_rq_iterator.arg = busiest;

return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&rt_rq_iterator);
/* don't touch RT tasks */
return 0;
}
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)
Expand Down

0 comments on commit cc15734

Please sign in to comment.