Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/x86/linux-2.6-idle-fix

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-idle-fix:
  fix idle (arch, acpi and apm) and lockdep
  • Loading branch information
Linus Torvalds committed Apr 27, 2008
2 parents cf867ac + 7f424a8 commit f222eba
Show file tree
Hide file tree
Showing 6 changed files with 137 additions and 244 deletions.
3 changes: 3 additions & 0 deletions arch/x86/kernel/apm_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -904,13 +904,16 @@ static void apm_cpu_idle(void)
original_pm_idle();
else
default_idle();
local_irq_disable();
jiffies_since_last_check = jiffies - last_jiffies;
if (jiffies_since_last_check > idle_period)
goto recalc;
}

if (apm_idle_done)
apm_do_busy();

local_irq_enable();
}

/**
Expand Down
117 changes: 117 additions & 0 deletions arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/pm.h>

struct kmem_cache *task_xstate_cachep;

Expand Down Expand Up @@ -42,3 +44,118 @@ void arch_task_cache_init(void)
__alignof__(union thread_xstate),
SLAB_PANIC, NULL);
}

static void do_nothing(void *unused)
{
}

/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 0, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);

/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{
if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(ax, cx);
}
}

/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__sti_mwait(0, 0);
else
local_irq_enable();
} else
local_irq_enable();
}


static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
{
if (force_mwait)
return 1;
/* Any C1 states supported? */
return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
}

/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
*/
static void poll_idle(void)
{
local_irq_enable();
cpu_relax();
}

void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
static int selected;

if (selected)
return;
#ifdef CONFIG_X86_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) {
printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
" performance may degrade.\n");
}
#endif
if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
/*
* Skip, if setup has overridden idle.
* One CPU supports mwait => All CPUs supports mwait
*/
if (!pm_idle) {
printk(KERN_INFO "using mwait in idle threads.\n");
pm_idle = mwait_idle;
}
}
selected = 1;
}

static int __init idle_setup(char *str)
{
if (!strcmp(str, "poll")) {
printk("using polling idle threads.\n");
pm_idle = poll_idle;
} else if (!strcmp(str, "mwait"))
force_mwait = 1;
else
return -1;

boot_option_idle_override = 1;
return 0;
}
early_param("idle", idle_setup);

118 changes: 4 additions & 114 deletions arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,10 @@ void default_idle(void)
*/
smp_mb();

local_irq_disable();
if (!need_resched()) {
if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
local_irq_disable();
}
local_irq_enable();
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
} else {
local_irq_enable();
Expand All @@ -128,17 +126,6 @@ void default_idle(void)
EXPORT_SYMBOL(default_idle);
#endif

/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
*/
static void poll_idle(void)
{
local_irq_enable();
cpu_relax();
}

#ifdef CONFIG_HOTPLUG_CPU
#include <asm/nmi.h>
/* We don't actually take CPU down, just spin without interrupts. */
Expand Down Expand Up @@ -196,6 +183,7 @@ void cpu_idle(void)
if (cpu_is_offline(cpu))
play_dead();

local_irq_disable();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
}
Expand All @@ -206,104 +194,6 @@ void cpu_idle(void)
}
}

static void do_nothing(void *unused)
{
}

/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 0, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);

/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{
if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__sti_mwait(ax, cx);
else
local_irq_enable();
} else
local_irq_enable();
}

/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
local_irq_enable();
mwait_idle_with_hints(0, 0);
}

static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
{
if (force_mwait)
return 1;
/* Any C1 states supported? */
return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
}

void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
static int selected;

if (selected)
return;
#ifdef CONFIG_X86_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) {
printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
" performance may degrade.\n");
}
#endif
if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
/*
* Skip, if setup has overridden idle.
* One CPU supports mwait => All CPUs supports mwait
*/
if (!pm_idle) {
printk(KERN_INFO "using mwait in idle threads.\n");
pm_idle = mwait_idle;
}
}
selected = 1;
}

static int __init idle_setup(char *str)
{
if (!strcmp(str, "poll")) {
printk("using polling idle threads.\n");
pm_idle = poll_idle;
} else if (!strcmp(str, "mwait"))
force_mwait = 1;
else
return -1;

boot_option_idle_override = 1;
return 0;
}
early_param("idle", idle_setup);

void __show_registers(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
Expand Down
Loading

0 comments on commit f222eba

Please sign in to comment.