Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
50904a7
Documentation
arch
block
crypto
drivers
firmware
fs
include
init
ipc
kernel
bpf
configs
debug
events
gcov
irq
livepatch
locking
power
printk
rcu
sched
Makefile
auto_group.c
auto_group.h
clock.c
completion.c
core.c
cpuacct.c
cpuacct.h
cpudeadline.c
cpudeadline.h
cpupri.c
cpupri.h
cputime.c
deadline.c
debug.c
fair.c
features.h
idle.c
idle_task.c
proc.c
rt.c
sched.h
stats.c
stats.h
stop_task.c
wait.c
time
trace
.gitignore
Kconfig.freezer
Kconfig.hz
Kconfig.locks
Kconfig.preempt
Makefile
acct.c
async.c
audit.c
audit.h
audit_tree.c
audit_watch.c
auditfilter.c
auditsc.c
backtracetest.c
bounds.c
capability.c
cgroup.c
cgroup_freezer.c
compat.c
configs.c
context_tracking.c
cpu.c
cpu_pm.c
cpuset.c
crash_dump.c
cred.c
delayacct.c
dma.c
elfcore.c
exec_domain.c
exit.c
extable.c
fork.c
freezer.c
futex.c
futex_compat.c
groups.c
hung_task.c
irq_work.c
jump_label.c
kallsyms.c
kcmp.c
kexec.c
kmod.c
kprobes.c
ksysfs.c
kthread.c
latencytop.c
module-internal.h
module.c
module_signing.c
notifier.c
nsproxy.c
padata.c
panic.c
params.c
pid.c
pid_namespace.c
profile.c
ptrace.c
range.c
reboot.c
relay.c
resource.c
seccomp.c
signal.c
smp.c
smpboot.c
smpboot.h
softirq.c
stacktrace.c
stop_machine.c
sys.c
sys_ni.c
sysctl.c
sysctl_binary.c
system_certificates.S
system_keyring.c
task_work.c
taskstats.c
test_kprobes.c
torture.c
tracepoint.c
tsacct.c
uid16.c
up.c
user-return-notifier.c
user.c
user_namespace.c
utsname.c
utsname_sysctl.c
watchdog.c
workqueue.c
workqueue_internal.h
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
kernel
/
sched
/
idle.c
Copy path
Blame
Blame
Latest commit
History
History
298 lines (259 loc) · 7.25 KB
Breadcrumbs
linux
/
kernel
/
sched
/
idle.c
Top
File metadata and controls
Code
Blame
298 lines (259 loc) · 7.25 KB
Raw
/* * Generic entry point for the idle threads */ #include <linux/sched.h> #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/tick.h> #include <linux/mm.h> #include <linux/stackprotector.h> #include <linux/suspend.h> #include <asm/tlb.h> #include <trace/events/power.h> #include "sched.h" static int __read_mostly cpu_idle_force_poll; void cpu_idle_poll_ctrl(bool enable) { if (enable) { cpu_idle_force_poll++; } else { cpu_idle_force_poll--; WARN_ON_ONCE(cpu_idle_force_poll < 0); } } #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP static int __init cpu_idle_poll_setup(char *__unused) { cpu_idle_force_poll = 1; return 1; } __setup("nohlt", cpu_idle_poll_setup); static int __init cpu_idle_nopoll_setup(char *__unused) { cpu_idle_force_poll = 0; return 1; } __setup("hlt", cpu_idle_nopoll_setup); #endif static inline int cpu_idle_poll(void) { rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); rcu_idle_exit(); return 1; } /* Weak implementations for optional arch specific functions */ void __weak arch_cpu_idle_prepare(void) { } void __weak arch_cpu_idle_enter(void) { } void __weak arch_cpu_idle_exit(void) { } void __weak arch_cpu_idle_dead(void) { } void __weak arch_cpu_idle(void) { cpu_idle_force_poll = 1; local_irq_enable(); } /** * cpuidle_idle_call - the main idle function * * NOTE: no locks or semaphores should be used here * * On archs that support TIF_POLLING_NRFLAG, is called with polling * set, and it returns with polling set. If it ever stops polling, it * must clear the polling bit. */ static void cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int next_state, entered_state; bool reflect; /* * Check if the idle task must be rescheduled. If it is the * case, exit the function after re-enabling the local irq. */ if (need_resched()) { local_irq_enable(); return; } /* * During the idle period, stop measuring the disabled irqs * critical sections latencies */ stop_critical_timings(); /* * Tell the RCU framework we are entering an idle section, * so no more rcu read side critical sections and one more * step to the grace period */ rcu_idle_enter(); if (cpuidle_not_available(drv, dev)) goto use_default; /* * Suspend-to-idle ("freeze") is a system state in which all user space * has been frozen, all I/O devices have been suspended and the only * activity happens here and in iterrupts (if any). In that case bypass * the cpuidle governor and go stratight for the deepest idle state * available. Possibly also suspend the local tick and the entire * timekeeping to prevent timer interrupts from kicking us out of idle * until a proper wakeup interrupt happens. */ if (idle_should_freeze()) { entered_state = cpuidle_enter_freeze(drv, dev); if (entered_state >= 0) { local_irq_enable(); goto exit_idle; } reflect = false; next_state = cpuidle_find_deepest_state(drv, dev); } else { reflect = true; /* * Ask the cpuidle framework to choose a convenient idle state. */ next_state = cpuidle_select(drv, dev); } /* Fall back to the default arch idle method on errors. */ if (next_state < 0) goto use_default; /* * The idle task must be scheduled, it is pointless to * go to idle, just update no idle residency and get * out of this function */ if (current_clr_polling_and_test()) { dev->last_residency = 0; entered_state = next_state; local_irq_enable(); goto exit_idle; } /* Take note of the planned idle state. */ idle_set_state(this_rq(), &drv->states[next_state]); /* * Enter the idle state previously returned by the governor decision. * This function will block until an interrupt occurs and will take * care of re-enabling the local interrupts */ entered_state = cpuidle_enter(drv, dev, next_state); /* The cpu is no longer idle or about to enter idle. */ idle_set_state(this_rq(), NULL); if (entered_state == -EBUSY) goto use_default; /* * Give the governor an opportunity to reflect on the outcome */ if (reflect) cpuidle_reflect(dev, entered_state); exit_idle: __current_set_polling(); /* * It is up to the idle functions to reenable local interrupts */ if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); rcu_idle_exit(); start_critical_timings(); return; use_default: /* * We can't use the cpuidle framework, let's use the default * idle routine. */ if (current_clr_polling_and_test()) local_irq_enable(); else arch_cpu_idle(); goto exit_idle; } DEFINE_PER_CPU(bool, cpu_dead_idle); /* * Generic idle loop implementation * * Called with polling cleared. */ static void cpu_idle_loop(void) { while (1) { /* * If the arch has a polling bit, we maintain an invariant: * * Our polling bit is clear if we're not scheduled (i.e. if * rq->curr != rq->idle). This means that, if rq->idle has * the polling bit set, then setting need_resched is * guaranteed to cause the cpu to reschedule. */ __current_set_polling(); tick_nohz_idle_enter(); while (!need_resched()) { check_pgt_cache(); rmb(); if (cpu_is_offline(smp_processor_id())) { rcu_cpu_notify(NULL, CPU_DYING_IDLE, (void *)(long)smp_processor_id()); smp_mb(); /* all activity before dead. */ this_cpu_write(cpu_dead_idle, true); arch_cpu_idle_dead(); } local_irq_disable(); arch_cpu_idle_enter(); /* * In poll mode we reenable interrupts and spin. * * Also if we detected in the wakeup from idle * path that the tick broadcast device expired * for us, we don't want to go deep idle as we * know that the IPI is going to arrive right * away */ if (cpu_idle_force_poll || tick_check_broadcast_expired()) cpu_idle_poll(); else cpuidle_idle_call(); arch_cpu_idle_exit(); } /* * Since we fell out of the loop above, we know * TIF_NEED_RESCHED must be set, propagate it into * PREEMPT_NEED_RESCHED. * * This is required because for polling idle loops we will * not have had an IPI to fold the state for us. */ preempt_set_need_resched(); tick_nohz_idle_exit(); __current_clr_polling(); /* * We promise to call sched_ttwu_pending and reschedule * if need_resched is set while polling is set. That * means that clearing polling needs to be visible * before doing these things. */ smp_mb__after_atomic(); sched_ttwu_pending(); schedule_preempt_disabled(); } } void cpu_startup_entry(enum cpuhp_state state) { /* * This #ifdef needs to die, but it's too late in the cycle to * make this generic (arm and sh have never invoked the canary * init for the non boot cpus!). Will be fixed in 3.11 */ #ifdef CONFIG_X86 /* * If we're the non-boot CPU, nothing set the stack canary up * for us. The boot CPU already has it initialized but no harm * in doing it again. This is a good place for updating it, as * we wont ever return from this function (so the invalid * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); #endif arch_cpu_idle_prepare(); cpu_idle_loop(); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
You can’t perform that action at this time.