From 6db5580cde5ade33099498218e507f216824360b Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 27 Dec 2011 11:27:16 +0100 Subject: [PATCH] --- yaml --- r: 280624 b: refs/heads/master c: d68bddb7329a4d47d950d6b0745a7e274d230ed4 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/s390/include/asm/topology.h | 2 ++ trunk/arch/s390/kernel/smp.c | 2 ++ trunk/arch/s390/kernel/topology.c | 34 ++++++++++++++++++++------ 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/[refs] b/[refs] index aa842f9850e1..ee12e50f08a5 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c5328901aa1db134325607d65527742d8be07f7d +refs/heads/master: d68bddb7329a4d47d950d6b0745a7e274d230ed4 diff --git a/trunk/arch/s390/include/asm/topology.h b/trunk/arch/s390/include/asm/topology.h index 7016dd7b6bc4..0837de80c351 100644 --- a/trunk/arch/s390/include/asm/topology.h +++ b/trunk/arch/s390/include/asm/topology.h @@ -35,11 +35,13 @@ int topology_cpu_init(struct cpu *); int topology_set_cpu_management(int fc); void topology_schedule_update(void); void store_topology(struct sysinfo_15_1_x *info); +void topology_expect_change(void); #else /* CONFIG_SCHED_BOOK */ static inline void topology_schedule_update(void) { } static inline int topology_cpu_init(struct cpu *cpu) { return 0; } +static inline void topology_expect_change(void) { } #endif /* CONFIG_SCHED_BOOK */ diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 109e7422bb20..8aba77df68a9 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -867,6 +867,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, if (!rc) { smp_cpu_state[cpu] = CPU_STATE_STANDBY; cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + topology_expect_change(); } } break; @@ -876,6 +877,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev, if (!rc) { smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + topology_expect_change(); } } break; diff --git a/trunk/arch/s390/kernel/topology.c b/trunk/arch/s390/kernel/topology.c index 2abad3014928..e06fb852d386 100644 --- a/trunk/arch/s390/kernel/topology.c +++ b/trunk/arch/s390/kernel/topology.c @@ -31,7 +31,6 @@ struct mask_info { static int topology_enabled = 1; static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; -static struct timer_list topology_timer; static void set_topology_timer(void); static DECLARE_WORK(topology_work, topology_work_fn); /* topology_lock protects the core linked list */ @@ -297,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored) set_topology_timer(); } +static struct timer_list topology_timer = + TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); + +static atomic_t topology_poll = ATOMIC_INIT(0); + static void set_topology_timer(void) { - topology_timer.function = topology_timer_fn; - topology_timer.data = 0; - topology_timer.expires = jiffies + 60 * HZ; - add_timer(&topology_timer); + if (atomic_add_unless(&topology_poll, -1, 0)) + mod_timer(&topology_timer, jiffies + HZ / 10); + else + mod_timer(&topology_timer, jiffies + HZ * 60); +} + +void topology_expect_change(void) +{ + if (!MACHINE_HAS_TOPOLOGY) + return; + /* This is racy, but it doesn't matter since it is just a heuristic. + * Worst case is that we poll in a higher frequency for a bit longer. + */ + if (atomic_read(&topology_poll) > 60) + return; + atomic_add(60, &topology_poll); + set_topology_timer(); } static int __init early_parse_topology(char *p) @@ -379,8 +396,10 @@ static ssize_t dispatching_store(struct sysdev_class *dev, if (cpu_management == val) goto out; rc = topology_set_cpu_management(val); - if (!rc) - cpu_management = val; + if (rc) + goto out; + cpu_management = val; + topology_expect_change(); out: mutex_unlock(&smp_cpu_state_mutex); put_online_cpus(); @@ -438,7 +457,6 @@ static int __init topology_init(void) topology_update_polarization_simple(); goto out; } - init_timer_deferrable(&topology_timer); set_topology_timer(); out: update_cpu_core_map();