Skip to content

Commit

Permalink
Merge branch 'smp/hotplug' into sched/core, to resolve conflicts
Browse files Browse the repository at this point in the history
Conflicts:
	kernel/sched/core.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Ingo Molnar committed May 12, 2016
2 parents eb60b3e + e5ef27d commit 4eb8676
Show file tree
Hide file tree
Showing 10 changed files with 210 additions and 279 deletions.
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
smp_ops->give_timebase();

/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu) || !cpu_active(cpu))
while (!cpu_online(cpu))
cpu_relax();

return 0;
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -832,7 +832,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu) || !cpu_active(cpu))
while (!cpu_online(cpu))
cpu_relax();
return 0;
}
Expand Down
18 changes: 0 additions & 18 deletions include/linux/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,25 +59,7 @@ struct notifier_block;
* CPU notifier priorities.
*/
enum {
/*
* SCHED_ACTIVE marks a cpu which is coming up active during
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
* notifier. CPUSET_ACTIVE adjusts cpuset according to
* cpu_active mask right after SCHED_ACTIVE. During
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
* ordered in the similar way.
*
* This ordering guarantees consistent cpu_active mask and
* migration behavior to all cpu notifiers.
*/
CPU_PRI_SCHED_ACTIVE = INT_MAX,
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
CPU_PRI_CPUSET_INACTIVE = INT_MIN,

/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,

/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
Expand Down
2 changes: 2 additions & 0 deletions include/linux/cpuhotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ enum cpuhp_state {
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
Expand All @@ -16,6 +17,7 @@ enum cpuhp_state {
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};

Expand Down
6 changes: 2 additions & 4 deletions include/linux/cpumask.h
Original file line number Diff line number Diff line change
Expand Up @@ -743,12 +743,10 @@ set_cpu_present(unsigned int cpu, bool present)
static inline void
set_cpu_online(unsigned int cpu, bool online)
{
if (online) {
if (online)
cpumask_set_cpu(cpu, &__cpu_online_mask);
cpumask_set_cpu(cpu, &__cpu_active_mask);
} else {
else
cpumask_clear_cpu(cpu, &__cpu_online_mask);
}
}

static inline void
Expand Down
9 changes: 9 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,15 @@ extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
extern int sched_cpu_starting(unsigned int cpu);
extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);

#ifdef CONFIG_HOTPLUG_CPU
extern int sched_cpu_dying(unsigned int cpu);
#else
# define sched_cpu_dying NULL
#endif

extern void sched_show_task(struct task_struct *p);

Expand Down
32 changes: 15 additions & 17 deletions kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -703,21 +703,6 @@ static int takedown_cpu(unsigned int cpu)
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;

/*
* By now we've cleared cpu_active_mask, wait for all preempt-disabled
* and RCU users of this state to go away such that all new such users
* will observe it.
*
* For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
* not imply sync_sched(), so wait for both.
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
if (IS_ENABLED(CONFIG_PREEMPT))
synchronize_rcu_mult(call_rcu, call_rcu_sched);
else
synchronize_rcu();

/* Park the smpboot threads */
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
Expand Down Expand Up @@ -923,8 +908,6 @@ void cpuhp_online_idle(enum cpuhp_state state)

st->state = CPUHP_AP_ONLINE_IDLE;

/* The cpu is marked online, set it active now */
set_cpu_active(cpu, true);
/* Unpark the stopper thread and the hotplug thread of this cpu */
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
Expand Down Expand Up @@ -1236,6 +1219,12 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.name = "ap:offline",
.cant_stop = true,
},
/* First state is scheduler control. Interrupts are disabled */
[CPUHP_AP_SCHED_STARTING] = {
.name = "sched:starting",
.startup = sched_cpu_starting,
.teardown = sched_cpu_dying,
},
/*
* Low level startup/teardown notifiers. Run with interrupts
* disabled. Will be removed once the notifiers are converted to
Expand Down Expand Up @@ -1274,6 +1263,15 @@ static struct cpuhp_step cpuhp_ap_states[] = {
* The dynamically registered state space is here
*/

#ifdef CONFIG_SMP
/* Last state is scheduler control setting the cpu active */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup = sched_cpu_activate,
.teardown = sched_cpu_deactivate,
},
#endif

/* CPU is fully up and running. */
[CPUHP_ONLINE] = {
.name = "online",
Expand Down
Loading

0 comments on commit 4eb8676

Please sign in to comment.