Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
0db0628
Documentation
arch
block
crypto
drivers
firmware
fs
include
init
ipc
kernel
cpu
debug
events
gcov
irq
power
sched
time
trace
.gitignore
Kconfig.freezer
Kconfig.hz
Kconfig.locks
Kconfig.preempt
Makefile
acct.c
async.c
audit.c
audit.h
audit_tree.c
audit_watch.c
auditfilter.c
auditsc.c
backtracetest.c
bounds.c
capability.c
cgroup.c
cgroup_freezer.c
compat.c
configs.c
context_tracking.c
cpu.c
cpu_pm.c
cpuset.c
crash_dump.c
cred.c
delayacct.c
dma.c
elfcore.c
exec_domain.c
exit.c
extable.c
fork.c
freezer.c
futex.c
futex_compat.c
groups.c
hrtimer.c
hung_task.c
irq_work.c
itimer.c
jump_label.c
kallsyms.c
kcmp.c
kexec.c
kmod.c
kprobes.c
ksysfs.c
kthread.c
latencytop.c
lglock.c
lockdep.c
lockdep_internals.h
lockdep_proc.c
lockdep_states.h
modsign_certificate.S
modsign_pubkey.c
module-internal.h
module.c
module_signing.c
mutex-debug.c
mutex-debug.h
mutex.c
mutex.h
notifier.c
nsproxy.c
padata.c
panic.c
params.c
pid.c
pid_namespace.c
posix-cpu-timers.c
posix-timers.c
printk.c
profile.c
ptrace.c
range.c
rcu.h
rcupdate.c
rcutiny.c
rcutiny_plugin.h
rcutorture.c
rcutree.c
rcutree.h
rcutree_plugin.h
rcutree_trace.c
reboot.c
relay.c
res_counter.c
resource.c
rtmutex-debug.c
rtmutex-debug.h
rtmutex-tester.c
rtmutex.c
rtmutex.h
rtmutex_common.h
rwsem.c
seccomp.c
semaphore.c
signal.c
smp.c
smpboot.c
smpboot.h
softirq.c
spinlock.c
srcu.c
stacktrace.c
stop_machine.c
sys.c
sys_ni.c
sysctl.c
sysctl_binary.c
task_work.c
taskstats.c
test_kprobes.c
time.c
timeconst.bc
timer.c
tracepoint.c
tsacct.c
uid16.c
up.c
user-return-notifier.c
user.c
user_namespace.c
utsname.c
utsname_sysctl.c
wait.c
watchdog.c
workqueue.c
workqueue_internal.h
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
kernel
/
smpboot.c
Blame
Blame
Latest commit
History
History
313 lines (272 loc) · 6.89 KB
Breadcrumbs
linux
/
kernel
/
smpboot.c
Top
File metadata and controls
Code
Blame
313 lines (272 loc) · 6.89 KB
Raw
/* * Common SMP CPU bringup/teardown functions */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/kthread.h> #include <linux/smpboot.h> #include "smpboot.h" #ifdef CONFIG_SMP #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD /* * For the hotplug case we keep the task structs around and reuse * them. */ static DEFINE_PER_CPU(struct task_struct *, idle_threads); struct task_struct *idle_thread_get(unsigned int cpu) { struct task_struct *tsk = per_cpu(idle_threads, cpu); if (!tsk) return ERR_PTR(-ENOMEM); init_idle(tsk, cpu); return tsk; } void __init idle_thread_set_boot_cpu(void) { per_cpu(idle_threads, smp_processor_id()) = current; } /** * idle_init - Initialize the idle thread for a cpu * @cpu: The cpu for which the idle thread should be initialized * * Creates the thread if it does not exist. */ static inline void idle_init(unsigned int cpu) { struct task_struct *tsk = per_cpu(idle_threads, cpu); if (!tsk) { tsk = fork_idle(cpu); if (IS_ERR(tsk)) pr_err("SMP: fork_idle() failed for CPU %u\n", cpu); else per_cpu(idle_threads, cpu) = tsk; } } /** * idle_threads_init - Initialize idle threads for all cpus */ void __init idle_threads_init(void) { unsigned int cpu, boot_cpu; boot_cpu = smp_processor_id(); for_each_possible_cpu(cpu) { if (cpu != boot_cpu) idle_init(cpu); } } #endif #endif /* #ifdef CONFIG_SMP */ static LIST_HEAD(hotplug_threads); static DEFINE_MUTEX(smpboot_threads_lock); struct smpboot_thread_data { unsigned int cpu; unsigned int status; struct smp_hotplug_thread *ht; }; enum { HP_THREAD_NONE = 0, HP_THREAD_ACTIVE, HP_THREAD_PARKED, }; /** * smpboot_thread_fn - percpu hotplug thread loop function * @data: thread data pointer * * Checks for thread stop and park conditions. Calls the necessary * setup, cleanup, park and unpark functions for the registered * thread. * * Returns 1 when the thread should exit, 0 otherwise. */ static int smpboot_thread_fn(void *data) { struct smpboot_thread_data *td = data; struct smp_hotplug_thread *ht = td->ht; while (1) { set_current_state(TASK_INTERRUPTIBLE); preempt_disable(); if (kthread_should_stop()) { set_current_state(TASK_RUNNING); preempt_enable(); if (ht->cleanup) ht->cleanup(td->cpu, cpu_online(td->cpu)); kfree(td); return 0; } if (kthread_should_park()) { __set_current_state(TASK_RUNNING); preempt_enable(); if (ht->park && td->status == HP_THREAD_ACTIVE) { BUG_ON(td->cpu != smp_processor_id()); ht->park(td->cpu); td->status = HP_THREAD_PARKED; } kthread_parkme(); /* We might have been woken for stop */ continue; } BUG_ON(td->cpu != smp_processor_id()); /* Check for state change setup */ switch (td->status) { case HP_THREAD_NONE: preempt_enable(); if (ht->setup) ht->setup(td->cpu); td->status = HP_THREAD_ACTIVE; preempt_disable(); break; case HP_THREAD_PARKED: preempt_enable(); if (ht->unpark) ht->unpark(td->cpu); td->status = HP_THREAD_ACTIVE; preempt_disable(); break; } if (!ht->thread_should_run(td->cpu)) { preempt_enable(); schedule(); } else { set_current_state(TASK_RUNNING); preempt_enable(); ht->thread_fn(td->cpu); } } } static int __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) { struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); struct smpboot_thread_data *td; if (tsk) return 0; td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); if (!td) return -ENOMEM; td->cpu = cpu; td->ht = ht; tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, ht->thread_comm); if (IS_ERR(tsk)) { kfree(td); return PTR_ERR(tsk); } get_task_struct(tsk); *per_cpu_ptr(ht->store, cpu) = tsk; if (ht->create) { /* * Make sure that the task has actually scheduled out * into park position, before calling the create * callback. At least the migration thread callback * requires that the task is off the runqueue. */ if (!wait_task_inactive(tsk, TASK_PARKED)) WARN_ON(1); else ht->create(cpu); } return 0; } int smpboot_create_threads(unsigned int cpu) { struct smp_hotplug_thread *cur; int ret = 0; mutex_lock(&smpboot_threads_lock); list_for_each_entry(cur, &hotplug_threads, list) { ret = __smpboot_create_thread(cur, cpu); if (ret) break; } mutex_unlock(&smpboot_threads_lock); return ret; } static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) { struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); if (ht->pre_unpark) ht->pre_unpark(cpu); kthread_unpark(tsk); } void smpboot_unpark_threads(unsigned int cpu) { struct smp_hotplug_thread *cur; mutex_lock(&smpboot_threads_lock); list_for_each_entry(cur, &hotplug_threads, list) smpboot_unpark_thread(cur, cpu); mutex_unlock(&smpboot_threads_lock); } static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) { struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); if (tsk && !ht->selfparking) kthread_park(tsk); } void smpboot_park_threads(unsigned int cpu) { struct smp_hotplug_thread *cur; mutex_lock(&smpboot_threads_lock); list_for_each_entry_reverse(cur, &hotplug_threads, list) smpboot_park_thread(cur, cpu); mutex_unlock(&smpboot_threads_lock); } static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) { unsigned int cpu; /* We need to destroy also the parked threads of offline cpus */ for_each_possible_cpu(cpu) { struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); if (tsk) { kthread_stop(tsk); put_task_struct(tsk); *per_cpu_ptr(ht->store, cpu) = NULL; } } } /** * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug * @plug_thread: Hotplug thread descriptor * * Creates and starts the threads on all online cpus. */ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) { unsigned int cpu; int ret = 0; mutex_lock(&smpboot_threads_lock); for_each_online_cpu(cpu) { ret = __smpboot_create_thread(plug_thread, cpu); if (ret) { smpboot_destroy_threads(plug_thread); goto out; } smpboot_unpark_thread(plug_thread, cpu); } list_add(&plug_thread->list, &hotplug_threads); out: mutex_unlock(&smpboot_threads_lock); return ret; } EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); /** * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug * @plug_thread: Hotplug thread descriptor * * Stops all threads on all possible cpus. */ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) { get_online_cpus(); mutex_lock(&smpboot_threads_lock); list_del(&plug_thread->list); smpboot_destroy_threads(plug_thread); mutex_unlock(&smpboot_threads_lock); put_online_cpus(); } EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
You can’t perform that action at this time.