Skip to content

Commit

Permalink
sched/hotplug: Convert cpu_[in]active notifiers to state machine
Browse files Browse the repository at this point in the history
Now that we reduced everything into single notifiers, it's simple to move them
into the hotplug state machine space.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: rt@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Thomas Gleixner committed May 6, 2016
1 parent c6d2c74 commit 40190a7
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 60 deletions.
12 changes: 0 additions & 12 deletions include/linux/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,18 +59,6 @@ struct notifier_block;
* CPU notifier priorities.
*/
enum {
/*
* SCHED_ACTIVE marks a cpu which is coming up active during
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first notifier. Is
* also cpuset according to cpu_active mask right after activating the
* cpu. During CPU_DOWN_PREPARE, SCHED_INACTIVE reversed the operation.
*
* This ordering guarantees consistent cpu_active mask and
* migration behavior to all cpu notifiers.
*/
CPU_PRI_SCHED_ACTIVE = INT_MAX,
CPU_PRI_SCHED_INACTIVE = INT_MIN,

/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
Expand Down
1 change: 1 addition & 0 deletions include/linux/cpuhotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ enum cpuhp_state {
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,
CPUHP_AP_ACTIVE,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
Expand Down
2 changes: 2 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,8 @@ extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
extern int sched_cpu_starting(unsigned int cpu);
extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);

extern void sched_show_task(struct task_struct *p);

Expand Down
8 changes: 6 additions & 2 deletions kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -923,8 +923,6 @@ void cpuhp_online_idle(enum cpuhp_state state)

st->state = CPUHP_AP_ONLINE_IDLE;

/* The cpu is marked online, set it active now */
set_cpu_active(cpu, true);
/* Unpark the stopper thread and the hotplug thread of this cpu */
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
Expand Down Expand Up @@ -1259,6 +1257,12 @@ static struct cpuhp_step cpuhp_ap_states[] = {
[CPUHP_AP_ONLINE] = {
.name = "ap:online",
},
/* First state is scheduler control. Interrupts are enabled */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup = sched_cpu_activate,
.teardown = sched_cpu_deactivate,
},
/* Handle smpboot threads park/unpark */
[CPUHP_AP_SMPBOOT_THREADS] = {
.name = "smpboot:threads",
Expand Down
67 changes: 21 additions & 46 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -6634,9 +6634,6 @@ static void sched_domains_numa_masks_set(unsigned int cpu)
int node = cpu_to_node(cpu);
int i, j;

if (!sched_smp_initialized)
return;

for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++) {
if (node_distance(j, node) <= sched_domains_numa_distance[i])
Expand All @@ -6649,9 +6646,6 @@ static void sched_domains_numa_masks_clear(unsigned int cpu)
{
int i, j;

if (!sched_smp_initialized)
return;

for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++)
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
Expand Down Expand Up @@ -7051,12 +7045,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
* If we come here as part of a suspend/resume, don't touch cpusets because we
* want to restore it back to its original state upon resume anyway.
*/
static void cpuset_cpu_active(bool frozen)
static void cpuset_cpu_active(void)
{
if (!sched_smp_initialized)
return;

if (frozen) {
if (cpuhp_tasks_frozen) {
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
Expand All @@ -7077,17 +7068,14 @@ static void cpuset_cpu_active(bool frozen)
cpuset_update_active_cpus(true);
}

static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
static int cpuset_cpu_inactive(unsigned int cpu)
{
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
int cpus;

if (!sched_smp_initialized)
return 0;

if (!frozen) {
if (!cpuhp_tasks_frozen) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);

Expand All @@ -7108,42 +7096,33 @@ static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
return 0;
}

static int sched_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
int sched_cpu_activate(unsigned int cpu)
{
unsigned int cpu = (unsigned long)hcpu;
set_cpu_active(cpu, true);

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
sched_domains_numa_masks_set(cpu);
cpuset_cpu_active(action & CPU_TASKS_FROZEN);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
cpuset_cpu_active();
}
return 0;
}

static int sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
int sched_cpu_deactivate(unsigned int cpu)
{
unsigned int cpu = (unsigned long)hcpu;
int ret;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
set_cpu_active(cpu, false);
ret = cpuset_cpu_inactive(cpu, action & CPU_TASKS_FROZEN);
if (ret) {
set_cpu_active(cpu, true);
return notifier_from_errno(ret);
}
sched_domains_numa_masks_clear(cpu);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
set_cpu_active(cpu, false);

if (!sched_smp_initialized)
return 0;

ret = cpuset_cpu_inactive(cpu);
if (ret) {
set_cpu_active(cpu, true);
return ret;
}
sched_domains_numa_masks_clear(cpu);
return 0;
}

int sched_cpu_starting(unsigned int cpu)
Expand Down Expand Up @@ -7197,10 +7176,6 @@ static int __init migration_init(void)
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);

/* Register cpu active notifiers */
cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);

return 0;
}
early_initcall(migration_init);
Expand Down

0 comments on commit 40190a7

Please sign in to comment.