From d8c3892f8f95ee8c686ae4e24550627bca288c7b Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 1 Dec 2011 17:07:33 -0800 Subject: [PATCH] --- yaml --- r: 277387 b: refs/heads/master c: 69e1e811dcc436a6b129dbef273ad9ec22d095ce h: refs/heads/master i: 277385: 37383a384bbbdfd47971b7217e531a2011c68663 277383: 8b680d31440f914ce670db10b5b98a6680a0d9e3 v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 6 ++++++ trunk/kernel/sched/core.c | 1 + trunk/kernel/sched/fair.c | 31 +++++++++++++++++++++++++++++++ trunk/kernel/sched/sched.h | 1 + trunk/kernel/time/tick-sched.c | 9 +++++++++ 6 files changed, 49 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 79fd7e43c2b6..f932634c9e5d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 1c792db7f7957e2e34b9a164f08200e36a25dfd0 +refs/heads/master: 69e1e811dcc436a6b129dbef273ad9ec22d095ce diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 8db17b7622ec..295666cb5b86 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern void select_nohz_load_balancer(int stop_tick); +extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else static inline void select_nohz_load_balancer(int stop_tick) { } +static inline void set_cpu_sd_state_idle(void); #endif /* @@ -901,6 +903,10 @@ struct sched_group_power { * single CPU. */ unsigned int power, power_orig; + /* + * Number of busy cpus in this group. + */ + atomic_t nr_busy_cpus; }; struct sched_group { diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index 7f1da77b83f3..699ff1499a8a 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -6024,6 +6024,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) return; update_group_power(sd, cpu); + atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight); } int __weak arch_sd_sibling_asym_packing(void) diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 50c06b0e9fab..e050563e97a4 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -4901,6 +4901,36 @@ static void nohz_balancer_kick(int cpu) return; } +static inline void set_cpu_sd_state_busy(void) +{ + struct sched_domain *sd; + int cpu = smp_processor_id(); + + if (!test_bit(NOHZ_IDLE, nohz_flags(cpu))) + return; + clear_bit(NOHZ_IDLE, nohz_flags(cpu)); + + rcu_read_lock(); + for_each_domain(cpu, sd) + atomic_inc(&sd->groups->sgp->nr_busy_cpus); + rcu_read_unlock(); +} + +void set_cpu_sd_state_idle(void) +{ + struct sched_domain *sd; + int cpu = smp_processor_id(); + + if (test_bit(NOHZ_IDLE, nohz_flags(cpu))) + return; + set_bit(NOHZ_IDLE, nohz_flags(cpu)); + + rcu_read_lock(); + for_each_domain(cpu, sd) + atomic_dec(&sd->groups->sgp->nr_busy_cpus); + rcu_read_unlock(); +} + /* * This routine will try to nominate the ilb (idle load balancing) * owner among the cpus whose ticks are stopped. ilb owner will do the idle @@ -5135,6 +5165,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) * We may be recently in ticked or tickless idle mode. At the first * busy tick after returning from idle, we will update the busy stats. */ + set_cpu_sd_state_busy(); if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); diff --git a/trunk/kernel/sched/sched.h b/trunk/kernel/sched/sched.h index cf7d02662bc2..91810f0ee3af 100644 --- a/trunk/kernel/sched/sched.h +++ b/trunk/kernel/sched/sched.h @@ -1069,6 +1069,7 @@ extern void account_cfs_bandwidth_used(int enabled, int was_enabled); enum rq_nohz_flag_bits { NOHZ_TICK_STOPPED, NOHZ_BALANCE_KICK, + NOHZ_IDLE, }; #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 40420644d0ba..31cc06163ed5 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -296,6 +296,15 @@ void tick_nohz_stop_sched_tick(int inidle) cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); + /* + * Update the idle state in the scheduler domain hierarchy + * when tick_nohz_stop_sched_tick() is called from the idle loop. + * State will be updated to busy during the first busy tick after + * exiting idle. + */ + if (inidle) + set_cpu_sd_state_idle(); + /* * Call to tick_nohz_start_idle stops the last_update_time from being * updated. Thus, it must not be called in the event we are called from