Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 351108
b: refs/heads/master
c: 6a377dd
h: refs/heads/master
v: v3
  • Loading branch information
Len Brown committed Feb 10, 2013
1 parent d774bf7 commit e848840
Show file tree
Hide file tree
Showing 32 changed files with 425 additions and 350 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2e7d0f60d8b976c951621c1bc82acf0654089c0b
refs/heads/master: 6a377ddc4e4ede2eeb9cd46ada23bbe417704fc9
13 changes: 9 additions & 4 deletions trunk/arch/arm/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,14 @@ static void default_idle(void)
local_irq_enable();
}

void (*pm_idle)(void) = default_idle;
EXPORT_SYMBOL(pm_idle);

/*
* The idle thread.
* We always respect 'hlt_counter' to prevent low power idle.
* The idle thread, has rather strange semantics for calling pm_idle,
* but this is what x86 does and we need to do the same, so that
* things like cpuidle get called in the same way. The only difference
* is that we always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
Expand Down Expand Up @@ -205,10 +210,10 @@ void cpu_idle(void)
} else if (!need_resched()) {
stop_critical_timings();
if (cpuidle_idle_call())
default_idle();
pm_idle();
start_critical_timings();
/*
* default_idle functions must always
* pm_idle functions must always
* return with IRQs enabled.
*/
WARN_ON(irqs_disabled());
Expand Down
84 changes: 59 additions & 25 deletions trunk/arch/arm/mach-davinci/cpuidle.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,44 +25,35 @@

#define DAVINCI_CPUIDLE_MAX_STATES 2

static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
static void __iomem *ddr2_reg_base;
static bool ddr2_pdown;

static void davinci_save_ddr_power(int enter, bool pdown)
{
u32 val;

val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);

if (enter) {
if (pdown)
val |= DDR2_SRPD_BIT;
else
val &= ~DDR2_SRPD_BIT;
val |= DDR2_LPMODEN_BIT;
} else {
val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
}

__raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
}
struct davinci_ops {
void (*enter) (u32 flags);
void (*exit) (u32 flags);
u32 flags;
};

/* Actual code that puts the SoC in different idle states */
static int davinci_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
davinci_save_ddr_power(1, ddr2_pdown);
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct davinci_ops *ops = cpuidle_get_statedata(state_usage);

if (ops && ops->enter)
ops->enter(ops->flags);

index = cpuidle_wrap_enter(dev, drv, index,
arm_cpuidle_simple_enter);

davinci_save_ddr_power(0, ddr2_pdown);
if (ops && ops->exit)
ops->exit(ops->flags);

return index;
}

/* fields in davinci_ops.flags */
#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)

static struct cpuidle_driver davinci_idle_driver = {
.name = "cpuidle-davinci",
.owner = THIS_MODULE,
Expand All @@ -79,6 +70,45 @@ static struct cpuidle_driver davinci_idle_driver = {
.state_count = DAVINCI_CPUIDLE_MAX_STATES,
};

static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
static void __iomem *ddr2_reg_base;

static void davinci_save_ddr_power(int enter, bool pdown)
{
u32 val;

val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET);

if (enter) {
if (pdown)
val |= DDR2_SRPD_BIT;
else
val &= ~DDR2_SRPD_BIT;
val |= DDR2_LPMODEN_BIT;
} else {
val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT);
}

__raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET);
}

static void davinci_c2state_enter(u32 flags)
{
davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
}

static void davinci_c2state_exit(u32 flags)
{
davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN));
}

static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
[1] = {
.enter = davinci_c2state_enter,
.exit = davinci_c2state_exit,
},
};

static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
int ret;
Expand All @@ -94,7 +124,11 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)

ddr2_reg_base = pdata->ddr2_ctlr_base;

ddr2_pdown = pdata->ddr2_pdown;
if (pdata->ddr2_pdown)
davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);

device->state_count = DAVINCI_CPUIDLE_MAX_STATES;

ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
Expand Down
13 changes: 9 additions & 4 deletions trunk/arch/arm64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,14 @@ static void default_idle(void)
local_irq_enable();
}

void (*pm_idle)(void) = default_idle;
EXPORT_SYMBOL_GPL(pm_idle);

/*
* The idle thread.
* We always respect 'hlt_counter' to prevent low power idle.
* The idle thread, has rather strange semantics for calling pm_idle,
* but this is what x86 does and we need to do the same, so that
* things like cpuidle get called in the same way. The only difference
* is that we always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
Expand All @@ -117,10 +122,10 @@ void cpu_idle(void)
local_irq_disable();
if (!need_resched()) {
stop_critical_timings();
default_idle();
pm_idle();
start_critical_timings();
/*
* default_idle functions should always return
* pm_idle functions should always return
* with IRQs enabled.
*/
WARN_ON(irqs_disabled());
Expand Down
7 changes: 7 additions & 0 deletions trunk/arch/blackfin/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,12 @@ int nr_l1stack_tasks;
void *l1_stack_base;
unsigned long l1_stack_len;

/*
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void) = NULL;
EXPORT_SYMBOL(pm_idle);

void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);

Expand Down Expand Up @@ -75,6 +81,7 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
void (*idle)(void) = pm_idle;

#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
Expand Down
11 changes: 10 additions & 1 deletion trunk/arch/cris/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,11 @@ void enable_hlt(void)

EXPORT_SYMBOL(enable_hlt);

/*
* The following aren't currently used.
*/
void (*pm_idle)(void);

extern void default_idle(void);

void (*pm_power_off)(void);
Expand All @@ -72,12 +77,16 @@ void cpu_idle (void)
while (1) {
rcu_idle_enter();
while (!need_resched()) {
void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
default_idle();
idle = pm_idle;
if (!idle)
idle = default_idle;
idle();
}
rcu_idle_exit();
schedule_preempt_disabled();
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/ia64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ void (*ia64_mark_idle)(int);

unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_idle) (void);
EXPORT_SYMBOL(pm_idle);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);

Expand Down Expand Up @@ -299,6 +301,7 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);

idle = pm_idle;
if (!idle)
idle = default_idle;
(*idle)();
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1051,6 +1051,7 @@ cpu_init (void)
max_num_phys_stacked = num_phys_stacked;
}
platform_cpu_init();
pm_idle = default_idle;
}

void __init
Expand Down
51 changes: 49 additions & 2 deletions trunk/arch/m32r/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,35 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return tsk->thread.lr;
}

/*
* Powermanagement idle function, if any..
*/
static void (*pm_idle)(void) = NULL;

void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);

/*
* We use this is we don't have any better
* idle routine..
*/
static void default_idle(void)
{
/* M32R_FIXME: Please use "cpu_sleep" mode. */
cpu_relax();
}

/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
*/
static void poll_idle (void)
{
/* M32R_FIXME */
cpu_relax();
}

/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
Expand All @@ -58,8 +84,14 @@ void cpu_idle (void)
/* endless idle loop with no priority at all */
while (1) {
rcu_idle_enter();
while (!need_resched())
cpu_relax();
while (!need_resched()) {
void (*idle)(void) = pm_idle;

if (!idle)
idle = default_idle;

idle();
}
rcu_idle_exit();
schedule_preempt_disabled();
}
Expand Down Expand Up @@ -88,6 +120,21 @@ void machine_power_off(void)
/* M32R_FIXME */
}

static int __init idle_setup (char *str)
{
if (!strncmp(str, "poll", 4)) {
printk("using poll in idle threads.\n");
pm_idle = poll_idle;
} else if (!strncmp(str, "sleep", 4)) {
printk("using sleep in idle threads.\n");
pm_idle = default_idle;
}

return 1;
}

__setup("idle=", idle_setup);

void show_regs(struct pt_regs * regs)
{
printk("\n");
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/microblaze/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ void show_regs(struct pt_regs *regs)
regs->msr, regs->ear, regs->esr, regs->fsr);
}

void (*pm_idle)(void);
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);

Expand Down Expand Up @@ -97,6 +98,8 @@ void cpu_idle(void)

/* endless idle loop with no priority at all */
while (1) {
void (*idle)(void) = pm_idle;

if (!idle)
idle = default_idle;

Expand Down
7 changes: 7 additions & 0 deletions trunk/arch/mn10300/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@
#include <asm/gdb-stub.h>
#include "internal.h"

/*
* power management idle function, if any..
*/
void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);

/*
* return saved PC of a blocked thread.
*/
Expand Down Expand Up @@ -107,6 +113,7 @@ void cpu_idle(void)
void (*idle)(void);

smp_rmb();
idle = pm_idle;
if (!idle) {
#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
idle = poll_idle;
Expand Down
5 changes: 5 additions & 0 deletions trunk/arch/openrisc/kernel/idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@

void (*powersave) (void) = NULL;

static inline void pm_idle(void)
{
barrier();
}

void cpu_idle(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
Expand Down
Loading

0 comments on commit e848840

Please sign in to comment.