Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 297404
b: refs/heads/master
c: 90e2401
h: refs/heads/master
v: v3
  • Loading branch information
Richard Weinberger authored and Ingo Molnar committed Mar 26, 2012
1 parent 9461999 commit b54c867
Show file tree
Hide file tree
Showing 5 changed files with 116 additions and 166 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f5243d6de7ae232e1d81e44ae9756bbd8c988fcd
refs/heads/master: 90e240142bd31ff10aeda5a280a53153f4eff004
1 change: 1 addition & 0 deletions trunk/arch/x86/include/asm/idle.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ void exit_idle(void);
#else /* !CONFIG_X86_64 */
static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
static inline void __exit_idle(void) { }
#endif /* CONFIG_X86_64 */

void amd_e400_remove_cpu(int cpu);
Expand Down
114 changes: 114 additions & 0 deletions trunk/arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
Expand All @@ -23,6 +26,24 @@
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>

#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);

void idle_notifier_register(struct notifier_block *n)
{
atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);

void idle_notifier_unregister(struct notifier_block *n)
{
atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_unregister);
#endif

struct kmem_cache *task_xstate_cachep;
EXPORT_SYMBOL_GPL(task_xstate_cachep);
Expand Down Expand Up @@ -371,6 +392,99 @@ static inline int hlt_use_halt(void)
}
#endif

#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
#endif

#ifdef CONFIG_X86_64
void enter_idle(void)
{
percpu_write(is_idle, 1);
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}

static void __exit_idle(void)
{
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
return;
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}

/* Called from interrupts to signify idle end */
void exit_idle(void)
{
/* idle loop has pid 0 */
if (current->pid)
return;
__exit_idle();
}
#endif

/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
/*
* If we're the non-boot CPU, nothing set the stack canary up
* for us. CPU0 already has it initialized but no harm in
* doing it again. This is a good place for updating it, as
* we wont ever return from this function (so the invalid
* canaries already on the stack wont ever trigger).
*/
boot_init_stack_canary();
current_thread_info()->status |= TS_POLLING;

while (1) {
tick_nohz_idle_enter();

while (!need_resched()) {
rmb();

if (cpu_is_offline(smp_processor_id()))
play_dead();

/*
* Idle routines should keep interrupts disabled
* from here on, until they go to idle.
* Otherwise, idle callbacks can misfire.
*/
local_touch_nmi();
local_irq_disable();

enter_idle();

/* Don't trace irqs off for idle */
stop_critical_timings();

/* enter_idle() needs rcu for notifiers */
rcu_idle_enter();

if (cpuidle_idle_call())
pm_idle();

rcu_idle_exit();
start_critical_timings();

/* In many cases the interrupt that ended idle
has already called exit_idle. But some idle
loops can be woken up without interrupt. */
__exit_idle();
}

tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}

/*
* We use this if we don't have any better
* idle routine..
Expand Down
58 changes: 0 additions & 58 deletions trunk/arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
* This file handles the architecture-dependent parts of process handling..
*/

#include <linux/stackprotector.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
Expand All @@ -31,14 +30,12 @@
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
#include <linux/tick.h>
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
#include <linux/cpuidle.h>

#include <asm/pgtable.h>
#include <asm/system.h>
Expand All @@ -58,7 +55,6 @@
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>

asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");

Expand All @@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return ((unsigned long *)tsk->thread.sp)[3];
}

#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
#endif

/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
int cpu = smp_processor_id();

/*
* If we're the non-boot CPU, nothing set the stack canary up
* for us. CPU0 already has it initialized but no harm in
* doing it again. This is a good place for updating it, as
* we wont ever return from this function (so the invalid
* canaries already on the stack wont ever trigger).
*/
boot_init_stack_canary();

current_thread_info()->status |= TS_POLLING;

/* endless idle loop with no priority at all */
while (1) {
tick_nohz_idle_enter();
rcu_idle_enter();
while (!need_resched()) {

check_pgt_cache();
rmb();

if (cpu_is_offline(cpu))
play_dead();

local_touch_nmi();
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cpuidle_idle_call())
pm_idle();
start_critical_timings();
}
rcu_idle_exit();
tick_nohz_idle_exit();
schedule_preempt_disabled();
}
}

void __show_regs(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
Expand Down
107 changes: 0 additions & 107 deletions trunk/arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
* This file handles the architecture-dependent parts of process handling..
*/

#include <linux/stackprotector.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
Expand All @@ -32,12 +31,10 @@
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/tick.h>
#include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/cpuidle.h>

#include <asm/pgtable.h>
#include <asm/system.h>
Expand All @@ -52,114 +49,10 @@
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>

asmlinkage extern void ret_from_fork(void);

DEFINE_PER_CPU(unsigned long, old_rsp);
static DEFINE_PER_CPU(unsigned char, is_idle);

static ATOMIC_NOTIFIER_HEAD(idle_notifier);

void idle_notifier_register(struct notifier_block *n)
{
atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);

void idle_notifier_unregister(struct notifier_block *n)
{
atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_unregister);

void enter_idle(void)
{
percpu_write(is_idle, 1);
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}

static void __exit_idle(void)
{
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
return;
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}

/* Called from interrupts to signify idle end */
void exit_idle(void)
{
/* idle loop has pid 0 */
if (current->pid)
return;
__exit_idle();
}

#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
#endif

/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
current_thread_info()->status |= TS_POLLING;

/*
* If we're the non-boot CPU, nothing set the stack canary up
* for us. CPU0 already has it initialized but no harm in
* doing it again. This is a good place for updating it, as
* we wont ever return from this function (so the invalid
* canaries already on the stack wont ever trigger).
*/
boot_init_stack_canary();

/* endless idle loop with no priority at all */
while (1) {
tick_nohz_idle_enter();
while (!need_resched()) {

rmb();

if (cpu_is_offline(smp_processor_id()))
play_dead();
/*
* Idle routines should keep interrupts disabled
* from here on, until they go to idle.
* Otherwise, idle callbacks can misfire.
*/
local_touch_nmi();
local_irq_disable();
enter_idle();
/* Don't trace irqs off for idle */
stop_critical_timings();

/* enter_idle() needs rcu for notifiers */
rcu_idle_enter();

if (cpuidle_idle_call())
pm_idle();

rcu_idle_exit();
start_critical_timings();

/* In many cases the interrupt that ended idle
has already called exit_idle. But some idle
loops can be woken up without interrupt. */
__exit_idle();
}

tick_nohz_idle_exit();
schedule_preempt_disabled();
}
}

/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)
Expand Down

0 comments on commit b54c867

Please sign in to comment.