From d6ead608c75f71c9500c23488d46fb733cd787e0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 16 Apr 2010 14:59:29 +0200 Subject: [PATCH] --- yaml --- r: 191412 b: refs/heads/master c: 669c55e9f99b90e46eaa0f98a67ec53d46dc969a h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 1 + trunk/kernel/sched.c | 3 +++ trunk/kernel/sched_fair.c | 12 +++++------- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/[refs] b/[refs] index b2f104b23266..22e2f31ba035 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 74f5187ac873042f502227701ed1727e7c5fbfa9 +refs/heads/master: 669c55e9f99b90e46eaa0f98a67ec53d46dc969a diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index e3e900f318d7..dfea40574b2a 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -960,6 +960,7 @@ struct sched_domain { char *name; #endif + unsigned int span_weight; /* * Span of all CPUs in this domain. * diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 0cc913a8554f..4956ed092838 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -6271,6 +6271,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; + for (tmp = sd; tmp; tmp = tmp->parent) + tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); + /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 88d3053ac7c2..0a413c7e3ab8 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1508,9 +1508,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ * Pick the largest domain to update shares over */ tmp = sd; - if (affine_sd && (!tmp || - cpumask_weight(sched_domain_span(affine_sd)) > - cpumask_weight(sched_domain_span(sd)))) + if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight)) tmp = affine_sd; if (tmp) { @@ -1554,10 +1552,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_ /* Now try balancing at a lower domain level of new_cpu */ cpu = new_cpu; - weight = cpumask_weight(sched_domain_span(sd)); + weight = sd->span_weight; sd = NULL; for_each_domain(cpu, tmp) { - if (weight <= cpumask_weight(sched_domain_span(tmp))) + if (weight <= tmp->span_weight) break; if (tmp->flags & sd_flag) sd = tmp; @@ -2243,7 +2241,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) { - unsigned long weight = cpumask_weight(sched_domain_span(sd)); + unsigned long weight = sd->span_weight; unsigned long smt_gain = sd->smt_gain; smt_gain /= weight; @@ -2276,7 +2274,7 @@ unsigned long scale_rt_power(int cpu) static void update_cpu_power(struct sched_domain *sd, int cpu) { - unsigned long weight = cpumask_weight(sched_domain_span(sd)); + unsigned long weight = sd->span_weight; unsigned long power = SCHED_LOAD_SCALE; struct sched_group *sdg = sd->groups;