Skip to content

Commit

Permalink
stop_machine: Move 'cpu_stopper_task' and 'stop_cpus_work' into 'stru…
Browse files Browse the repository at this point in the history
…ct cpu_stopper'

Multpiple DEFINE_PER_CPU's do not make sense, move all the per-cpu
variables into 'struct cpu_stopper'.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dave@stgolabs.net
Cc: der.herr@hofr.at
Cc: paulmck@linux.vnet.ibm.com
Cc: riel@redhat.com
Cc: viro@ZenIV.linux.org.uk
Link: http://lkml.kernel.org/r/20150630012944.GA23924@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Oleg Nesterov authored and Ingo Molnar committed Aug 3, 2015
1 parent fe32d3c commit 02cb7aa
Showing 1 changed file with 9 additions and 8 deletions.
17 changes: 9 additions & 8 deletions kernel/stop_machine.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,16 @@ struct cpu_stop_done {

/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
struct task_struct *thread;

spinlock_t lock;
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */

struct cpu_stop_work stop_work; /* for stop_cpus */
};

static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
static bool stop_machine_initialized = false;

/*
Expand Down Expand Up @@ -74,15 +77,14 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
{
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
struct task_struct *p = per_cpu(cpu_stopper_task, cpu);

unsigned long flags;

spin_lock_irqsave(&stopper->lock, flags);

if (stopper->enabled) {
list_add_tail(&work->list, &stopper->works);
wake_up_process(p);
wake_up_process(stopper->thread);
} else
cpu_stop_signal_done(work->done, false);

Expand Down Expand Up @@ -293,7 +295,6 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,

/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);

static void queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
Expand All @@ -304,7 +305,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,

/* initialize works and done */
for_each_cpu(cpu, cpumask) {
work = &per_cpu(stop_cpus_work, cpu);
work = &per_cpu(cpu_stopper.stop_work, cpu);
work->fn = fn;
work->arg = arg;
work->done = done;
Expand All @@ -317,7 +318,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
*/
lg_global_lock(&stop_cpus_lock);
for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
cpu_stop_queue_work(cpu, &per_cpu(cpu_stopper.stop_work, cpu));
lg_global_unlock(&stop_cpus_lock);
}

Expand Down Expand Up @@ -458,7 +459,7 @@ extern void sched_set_stop_task(int cpu, struct task_struct *stop);

static void cpu_stop_create(unsigned int cpu)
{
sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
}

static void cpu_stop_park(unsigned int cpu)
Expand All @@ -485,7 +486,7 @@ static void cpu_stop_unpark(unsigned int cpu)
}

static struct smp_hotplug_thread cpu_stop_threads = {
.store = &cpu_stopper_task,
.store = &cpu_stopper.thread,
.thread_should_run = cpu_stop_should_run,
.thread_fn = cpu_stopper_thread,
.thread_comm = "migration/%u",
Expand Down

0 comments on commit 02cb7aa

Please sign in to comment.