diff --git a/[refs] b/[refs] index e6704df86fea..0c3004d1c69d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d5679bd11916eba5c8ee9033003e1a5ce56ece9a +refs/heads/master: 7a09b1a27b1e5a4957e4af9951420fea02c44fba diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 94b9d11e3312..c1b8b3031eb2 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -509,6 +509,14 @@ struct root_domain { #ifdef CONFIG_SMP struct cpupri cpupri; #endif +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) + /* + * Preferred wake up cpu nominated by sched_mc balance that will be + * used when most cpus are idle in the system indicating overall very + * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) + */ + unsigned int sched_mc_preferred_wakeup_cpu; +#endif }; /* @@ -3384,6 +3392,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (this == group_leader && group_leader != group_min) { *imbalance = min_load_per_task; + if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { + cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = + first_cpu(group_leader->cpumask); + } return group_min; } #endif