From 43e3089561cba9b1ca94c22768d59b54557399cf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 1 Sep 2009 10:34:35 +0200 Subject: [PATCH] --- yaml --- r: 158367 b: refs/heads/master c: a52bfd73589eaf88d9c95ad2c1de0b38a6b27972 h: refs/heads/master i: 158365: 19a29a5424d17ce3123b2981cc89770bf7abd3fb 158363: 80438ca781c36697e5860fd3a237f7810099a4f5 158359: 56a37c3475dae98ff7a2808d29b9a8f0b3b411d6 158351: 8d5e9336bd7cba73552b498dd62ed38945592c3b 158335: 5d36b2e7e304e4eecc4e98f628298ccc0ce769ed v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 1 + trunk/include/linux/topology.h | 1 + trunk/kernel/sched.c | 8 +++++++- 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index b8840777da51..47cb3abb0934 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6 +refs/heads/master: a52bfd73589eaf88d9c95ad2c1de0b38a6b27972 diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 651dded25720..9c81c921acb3 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -921,6 +921,7 @@ struct sched_domain { unsigned int newidle_idx; unsigned int wake_idx; unsigned int forkexec_idx; + unsigned int smt_gain; int flags; /* See SD_* */ enum sched_domain_level level; diff --git a/trunk/include/linux/topology.h b/trunk/include/linux/topology.h index 7402c1a27c4f..6203ae5067ce 100644 --- a/trunk/include/linux/topology.h +++ b/trunk/include/linux/topology.h @@ -99,6 +99,7 @@ int arch_update_cpu_topology(void); | SD_SHARE_CPUPOWER, \ .last_balance = jiffies, \ .balance_interval = 1, \ + .smt_gain = 1178, /* 15% */ \ } #endif #endif /* CONFIG_SCHED_SMT */ diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index ecb4a47d4214..55112261027b 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -8523,9 +8523,15 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) weight = cpumask_weight(sched_domain_span(sd)); /* * SMT siblings share the power of a single core. + * Usually multiple threads get a better yield out of + * that one core than a single thread would have, + * reflect that in sd->smt_gain. */ - if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) + if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { + power *= sd->smt_gain; power /= weight; + power >>= SCHED_LOAD_SHIFT; + } sg_inc_cpu_power(sd->groups, power); return; }