From b203fd06594991bd2460733a390c8c1959e0bb77 Mon Sep 17 00:00:00 2001 From: Gregory Haskins Date: Thu, 30 Jul 2009 10:57:23 -0400 Subject: [PATCH] --- yaml --- r: 158337 b: refs/heads/master c: 00aec93d10a051ea64f83eff75d4065a19508ea6 h: refs/heads/master i: 158335: 5d36b2e7e304e4eecc4e98f628298ccc0ce769ed v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 2 +- trunk/kernel/sched_fair.c | 10 +++++++--- trunk/kernel/sched_rt.c | 7 ------- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/[refs] b/[refs] index 9f6d68151468..cbddade8e7ea 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3f029d3c6d62068d59301d90c18dbde8ee402107 +refs/heads/master: 00aec93d10a051ea64f83eff75d4065a19508ea6 diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 613fee54fc89..475138c42548 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -7927,7 +7927,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) rq->rd = rd; cpumask_set_cpu(rq->cpu, rd->span); - if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) + if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); spin_unlock_irqrestore(&rq->lock, flags); diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 652e8bdef9aa..493472984879 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1046,17 +1046,21 @@ static void yield_task_fair(struct rq *rq) * search starts with cpus closest then further out as needed, * so we always favor a closer, idle cpu. * Domains may include CPUs that are not usable for migration, - * hence we need to mask them out (cpu_active_mask) + * hence we need to mask them out (rq->rd->online) * * Returns the CPU we should wake onto. */ #if defined(ARCH_HAS_SCHED_WAKE_IDLE) + +#define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online) + static int wake_idle(int cpu, struct task_struct *p) { struct sched_domain *sd; int i; unsigned int chosen_wakeup_cpu; int this_cpu; + struct rq *task_rq = task_rq(p); /* * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu @@ -1089,10 +1093,10 @@ static int wake_idle(int cpu, struct task_struct *p) for_each_domain(cpu, sd) { if ((sd->flags & SD_WAKE_IDLE) || ((sd->flags & SD_WAKE_IDLE_FAR) - && !task_hot(p, task_rq(p)->clock, sd))) { + && !task_hot(p, task_rq->clock, sd))) { for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { - if (cpu_active(i) && idle_cpu(i)) { + if (cpu_rd_active(i, task_rq) && idle_cpu(i)) { if (i != task_cpu(p)) { schedstat_inc(p, se.nr_wakeups_idle); diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index a8f89bc3e5eb..13f728ef5b38 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -1172,13 +1172,6 @@ static int find_lowest_rq(struct task_struct *task) if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) return -1; /* No targets found */ - /* - * Only consider CPUs that are usable for migration. - * I guess we might want to change cpupri_find() to ignore those - * in the first place. - */ - cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); - /* * At this point we have built a mask of cpus representing the * lowest priority tasks in the system. Now we want to elect