From e4230ef7c372fae0a7a0daf8167bf62930073826 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Tue, 18 Aug 2009 12:59:28 +0200 Subject: [PATCH] --- yaml --- r: 158358 b: refs/heads/master c: 86548096f252bfe2065f1ea2d301e7319a16375d h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/sched.c | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/[refs] b/[refs] index 886bd38931b9..2a393b72efca 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a2af04cdbb748158043e31799b28c48272081600 +refs/heads/master: 86548096f252bfe2065f1ea2d301e7319a16375d diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index f2c202f66297..b09a41c93ae1 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -8586,6 +8586,13 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l, d->send_covered, d->tmpmask); break; #endif + case SD_LV_CPU: /* set up physical groups */ + cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); + if (!cpumask_empty(d->nodemask)) + init_sched_build_groups(d->nodemask, cpu_map, + &cpu_to_phys_group, + d->send_covered, d->tmpmask); + break; default: break; } @@ -8631,15 +8638,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, } /* Set up physical groups */ - for (i = 0; i < nr_node_ids; i++) { - cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map); - if (cpumask_empty(d.nodemask)) - continue; - - init_sched_build_groups(d.nodemask, cpu_map, - &cpu_to_phys_group, - d.send_covered, d.tmpmask); - } + for (i = 0; i < nr_node_ids; i++) + build_sched_groups(&d, SD_LV_CPU, cpu_map, i); #ifdef CONFIG_NUMA /* Set up node groups */