Skip to content

Commit

Permalink
workqueue: Convert to state machine callbacks
Browse files Browse the repository at this point in the history
Get rid of the prio ordering of the separate notifiers and use a proper state
callback pair.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153335.197083890@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Jul 14, 2016
1 parent c6a84da commit 7ee681b
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 74 deletions.
9 changes: 0 additions & 9 deletions include/linux/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;

/*
* CPU notifier priorities.
*/
enum {
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
};

#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
Expand Down
2 changes: 2 additions & 0 deletions include/linux/cpuhotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ enum cpuhp_state {
CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_WORKQUEUE_PREP,
CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
Expand Down Expand Up @@ -49,6 +50,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
Expand Down
6 changes: 6 additions & 0 deletions include/linux/workqueue.h
Original file line number Diff line number Diff line change
Expand Up @@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu);
static inline void wq_watchdog_touch(int cpu) { }
#endif /* CONFIG_WQ_WATCHDOG */

#ifdef CONFIG_SMP
int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif

#endif
10 changes: 10 additions & 0 deletions kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.startup = perf_event_init_cpu,
.teardown = perf_event_exit_cpu,
},
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue prepare",
.startup = workqueue_prepare_cpu,
.teardown = NULL,
},
/*
* Preparatory and dead notifiers. Will be replaced once the notifiers
* are converted to states.
Expand Down Expand Up @@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.startup = perf_event_init_cpu,
.teardown = perf_event_exit_cpu,
},
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue online",
.startup = workqueue_online_cpu,
.teardown = workqueue_offline_cpu,
},

/*
* Online/down_prepare notifiers. Will be removed once the notifiers
Expand Down
108 changes: 43 additions & 65 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
pool->attrs->cpumask) < 0);
}

/*
* Workqueues should be brought up before normal priority CPU notifiers.
* This will be registered high priority CPU notifier.
*/
static int workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
int workqueue_prepare_cpu(unsigned int cpu)
{
struct worker_pool *pool;

for_each_cpu_worker_pool(pool, cpu) {
if (pool->nr_workers)
continue;
if (!create_worker(pool))
return -ENOMEM;
}
return 0;
}

int workqueue_online_cpu(unsigned int cpu)
{
int cpu = (unsigned long)hcpu;
struct worker_pool *pool;
struct workqueue_struct *wq;
int pi;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
for_each_cpu_worker_pool(pool, cpu) {
if (pool->nr_workers)
continue;
if (!create_worker(pool))
return NOTIFY_BAD;
}
break;

case CPU_DOWN_FAILED:
case CPU_ONLINE:
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq_pool_mutex);

for_each_pool(pool, pi) {
mutex_lock(&pool->attach_mutex);
for_each_pool(pool, pi) {
mutex_lock(&pool->attach_mutex);

if (pool->cpu == cpu)
rebind_workers(pool);
else if (pool->cpu < 0)
restore_unbound_workers_cpumask(pool, cpu);
if (pool->cpu == cpu)
rebind_workers(pool);
else if (pool->cpu < 0)
restore_unbound_workers_cpumask(pool, cpu);

mutex_unlock(&pool->attach_mutex);
}
mutex_unlock(&pool->attach_mutex);
}

/* update NUMA affinity of unbound workqueues */
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, true);
/* update NUMA affinity of unbound workqueues */
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, true);

mutex_unlock(&wq_pool_mutex);
break;
}
return NOTIFY_OK;
mutex_unlock(&wq_pool_mutex);
return 0;
}

/*
* Workqueues should be brought down after normal priority CPU notifiers.
* This will be registered as low priority CPU notifier.
*/
static int workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
int workqueue_offline_cpu(unsigned int cpu)
{
int cpu = (unsigned long)hcpu;
struct work_struct unbind_work;
struct workqueue_struct *wq;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
/* unbinding per-cpu workers should happen on the local CPU */
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
queue_work_on(cpu, system_highpri_wq, &unbind_work);

/* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, false);
mutex_unlock(&wq_pool_mutex);

/* wait for per-cpu unbinding to finish */
flush_work(&unbind_work);
destroy_work_on_stack(&unbind_work);
break;
}
return NOTIFY_OK;
/* unbinding per-cpu workers should happen on the local CPU */
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
queue_work_on(cpu, system_highpri_wq, &unbind_work);

/* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, false);
mutex_unlock(&wq_pool_mutex);

/* wait for per-cpu unbinding to finish */
flush_work(&unbind_work);
destroy_work_on_stack(&unbind_work);
return 0;
}

#ifdef CONFIG_SMP
Expand Down Expand Up @@ -5490,9 +5471,6 @@ static int __init init_workqueues(void)

pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);

cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);

wq_numa_init();

/* initialize CPU pools */
Expand Down

0 comments on commit 7ee681b

Please sign in to comment.