Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100203
b: refs/heads/master
c: 20b6331
h: refs/heads/master
i:
  100201: 4d51e4c
  100199: fe7d2be
v: v3
  • Loading branch information
Dmitry Adamushko authored and Ingo Molnar committed Jun 18, 2008
1 parent 7b3c2e5 commit 2d58c02
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 40 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f7d62364b2cef85cbcd4feffdd3632ef7c3b61c2
refs/heads/master: 20b6331bfed1f07ba1e5006889a5d64adc53615e
6 changes: 2 additions & 4 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,7 @@ static inline int task_has_rt_policy(struct task_struct *p)
*/
struct rt_prio_array {
DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
struct list_head xqueue[MAX_RT_PRIO]; /* exclusive queue */
struct list_head squeue[MAX_RT_PRIO]; /* shared queue */
struct list_head queue[MAX_RT_PRIO];
};

struct rt_bandwidth {
Expand Down Expand Up @@ -7620,8 +7619,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)

array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->xqueue + i);
INIT_LIST_HEAD(array->squeue + i);
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
Expand Down
44 changes: 9 additions & 35 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -576,16 +576,15 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct list_head *queue = array->queue + rt_se_prio(rt_se);

if (group_rq && rt_rq_throttled(group_rq))
return;

if (rt_se->nr_cpus_allowed == 1)
list_add_tail(&rt_se->run_list,
array->xqueue + rt_se_prio(rt_se));
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list,
array->squeue + rt_se_prio(rt_se));
list_add_tail(&rt_se->run_list, queue);

__set_bit(rt_se_prio(rt_se), array->bitmap);

Expand All @@ -598,8 +597,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
struct rt_prio_array *array = &rt_rq->active;

list_del_init(&rt_se->run_list);
if (list_empty(array->squeue + rt_se_prio(rt_se))
&& list_empty(array->xqueue + rt_se_prio(rt_se)))
if (list_empty(array->queue + rt_se_prio(rt_se)))
__clear_bit(rt_se_prio(rt_se), array->bitmap);

dec_rt_tasks(rt_se, rt_rq);
Expand Down Expand Up @@ -666,19 +664,14 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
/*
* Put task to the end of the run list without the overhead of dequeue
* followed by enqueue.
*
* Note: We always enqueue the task to the shared-queue, regardless of its
* previous position w.r.t. exclusive vs shared. This is so that exclusive RR
* tasks fairly round-robin with all tasks on the runqueue, not just other
* exclusive tasks.
*/
static
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct rt_prio_array *array = &rt_rq->active;

list_del_init(&rt_se->run_list);
list_add_tail(&rt_se->run_list, array->squeue + rt_se_prio(rt_se));
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
}

static void requeue_task_rt(struct rq *rq, struct task_struct *p)
Expand Down Expand Up @@ -736,9 +729,6 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
}
#endif /* CONFIG_SMP */

static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq);

/*
* Preempt the current task with a newly woken task if needed:
*/
Expand All @@ -764,8 +754,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
*/
if((p->prio == rq->curr->prio)
&& p->rt.nr_cpus_allowed == 1
&& rq->curr->rt.nr_cpus_allowed != 1
&& pick_next_rt_entity(rq, &rq->rt) != &rq->curr->rt) {
&& rq->curr->rt.nr_cpus_allowed != 1) {
cpumask_t mask;

if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
Expand All @@ -789,15 +778,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
idx = sched_find_first_bit(array->bitmap);
BUG_ON(idx >= MAX_RT_PRIO);

queue = array->xqueue + idx;
if (!list_empty(queue))
next = list_entry(queue->next, struct sched_rt_entity,
run_list);
else {
queue = array->squeue + idx;
next = list_entry(queue->next, struct sched_rt_entity,
run_list);
}
queue = array->queue + idx;
next = list_entry(queue->next, struct sched_rt_entity, run_list);

return next;
}
Expand Down Expand Up @@ -867,7 +849,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
continue;
if (next && next->prio < idx)
continue;
list_for_each_entry(rt_se, array->squeue + idx, run_list) {
list_for_each_entry(rt_se, array->queue + idx, run_list) {
struct task_struct *p = rt_task_of(rt_se);
if (pick_rt_task(rq, p, cpu)) {
next = p;
Expand Down Expand Up @@ -1249,14 +1231,6 @@ static void set_cpus_allowed_rt(struct task_struct *p,
}

update_rt_migration(rq);

if (unlikely(weight == 1 || p->rt.nr_cpus_allowed == 1))
/*
* If either the new or old weight is a "1", we need
* to requeue to properly move between shared and
* exclusive queues.
*/
requeue_task_rt(rq, p);
}

p->cpus_allowed = *new_mask;
Expand Down

0 comments on commit 2d58c02

Please sign in to comment.