Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105238
b: refs/heads/master
c: 7ebefa8
h: refs/heads/master
v: v3
  • Loading branch information
Dmitry Adamushko authored and Ingo Molnar committed Jul 18, 2008
1 parent 8555f7f commit 2421523
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 13b40c1e40f3261e83ee514a08b77dbecb93021b
refs/heads/master: 7ebefa8ceefed44cc321be70afc54a585a68ac0b
68 changes: 40 additions & 28 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -599,11 +599,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return;

if (rt_se->nr_cpus_allowed == 1)
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);

list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);

inc_rt_tasks(rt_se, rt_rq);
Expand Down Expand Up @@ -688,32 +684,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
* Put task to the end of the run list without the overhead of dequeue
* followed by enqueue.
*/
static
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
struct rt_prio_array *array = &rt_rq->active;

if (on_rt_rq(rt_se)) {
list_del_init(&rt_se->run_list);
list_add_tail(&rt_se->run_list,
array->queue + rt_se_prio(rt_se));
struct rt_prio_array *array = &rt_rq->active;
struct list_head *queue = array->queue + rt_se_prio(rt_se);

if (head)
list_move(&rt_se->run_list, queue);
else
list_move_tail(&rt_se->run_list, queue);
}
}

static void requeue_task_rt(struct rq *rq, struct task_struct *p)
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;

for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se);
requeue_rt_entity(rt_rq, rt_se);
requeue_rt_entity(rt_rq, rt_se, head);
}
}

static void yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr);
requeue_task_rt(rq, rq->curr, 0);
}

#ifdef CONFIG_SMP
Expand Down Expand Up @@ -753,6 +751,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
*/
return task_cpu(p);
}

static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
cpumask_t mask;

if (rq->curr->rt.nr_cpus_allowed == 1)
return;

if (p->rt.nr_cpus_allowed != 1
&& cpupri_find(&rq->rd->cpupri, p, &mask))
return;

if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
return;

/*
* There appears to be other cpus that can accept
* current and none to run 'p', so lets reschedule
* to try and push current away:
*/
requeue_task_rt(rq, p, 1);
resched_task(rq->curr);
}

#endif /* CONFIG_SMP */

/*
Expand All @@ -778,18 +800,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
* to move current somewhere else, making room for our non-migratable
* task.
*/
if((p->prio == rq->curr->prio)
&& p->rt.nr_cpus_allowed == 1
&& rq->curr->rt.nr_cpus_allowed != 1) {
cpumask_t mask;

if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
/*
* There appears to be other cpus that can accept
* current, so lets reschedule to try and push it away
*/
resched_task(rq->curr);
}
if (p->prio == rq->curr->prio && !need_resched())
check_preempt_equal_prio(rq, p);
#endif
}

Expand Down Expand Up @@ -1415,7 +1427,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
* on the queue:
*/
if (p->rt.run_list.prev != p->rt.run_list.next) {
requeue_task_rt(rq, p);
requeue_task_rt(rq, p, 0);
set_tsk_need_resched(p);
}
}
Expand Down

0 comments on commit 2421523

Please sign in to comment.