From ac1d96d626836219851a3b5786234cce1d6d4834 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 12 Aug 2011 00:13:47 -0400 Subject: [PATCH] --- yaml --- r: 275034 b: refs/heads/master c: 22f4521d664030e417f41953e922f61c65f2e189 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/arm/mach-at91/cpuidle.c | 41 ++-- trunk/arch/arm/mach-davinci/cpuidle.c | 51 ++-- trunk/arch/arm/mach-exynos4/cpuidle.c | 30 +-- trunk/arch/arm/mach-kirkwood/cpuidle.c | 42 ++-- trunk/arch/arm/mach-omap2/cpuidle34xx.c | 133 ++++------- trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c | 28 +-- trunk/arch/x86/platform/mrst/pmu.c | 2 +- trunk/drivers/acpi/processor_driver.c | 20 +- trunk/drivers/acpi/processor_idle.c | 251 ++++---------------- trunk/drivers/cpuidle/cpuidle.c | 86 ++++--- trunk/drivers/cpuidle/driver.c | 25 -- trunk/drivers/cpuidle/governors/ladder.c | 41 +--- trunk/drivers/cpuidle/governors/menu.c | 29 +-- trunk/drivers/cpuidle/sysfs.c | 22 +- trunk/drivers/idle/intel_idle.c | 130 +++------- trunk/include/acpi/processor.h | 1 - trunk/include/linux/cpuidle.h | 52 ++-- 18 files changed, 334 insertions(+), 652 deletions(-) diff --git a/[refs] b/[refs] index 4e339296ddec..67ba8f79b9ed 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 46bcfad7a819bd17ac4e831b04405152d59784ab +refs/heads/master: 22f4521d664030e417f41953e922f61c65f2e189 diff --git a/trunk/arch/arm/mach-at91/cpuidle.c b/trunk/arch/arm/mach-at91/cpuidle.c index 93178f67420e..1cfeac1483d6 100644 --- a/trunk/arch/arm/mach-at91/cpuidle.c +++ b/trunk/arch/arm/mach-at91/cpuidle.c @@ -33,8 +33,7 @@ static struct cpuidle_driver at91_idle_driver = { /* Actual code that puts the SoC in different idle states */ static int at91_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { struct timeval before, after; int idle_time; @@ -42,10 +41,10 @@ static int at91_enter_idle(struct cpuidle_device *dev, local_irq_disable(); do_gettimeofday(&before); - if (index == 0) + if (state == &dev->states[0]) /* Wait for interrupt state */ cpu_do_idle(); - else if (index == 1) { + else if (state == &dev->states[1]) { asm("b 1f; .align 5; 1:"); asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ saved_lpr = sdram_selfrefresh_enable(); @@ -56,38 +55,34 @@ static int at91_enter_idle(struct cpuidle_device *dev, local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - - dev->last_residency = idle_time; - return index; + return idle_time; } /* Initialize CPU idle by registering the idle states */ static int at91_init_cpuidle(void) { struct cpuidle_device *device; - struct cpuidle_driver *driver = &at91_idle_driver; + + cpuidle_register_driver(&at91_idle_driver); device = &per_cpu(at91_cpuidle_device, smp_processor_id()); device->state_count = AT91_MAX_STATES; - driver->state_count = AT91_MAX_STATES; /* Wait for interrupt state */ - driver->states[0].enter = at91_enter_idle; - driver->states[0].exit_latency = 1; - driver->states[0].target_residency = 10000; - driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; - strcpy(driver->states[0].name, "WFI"); - strcpy(driver->states[0].desc, "Wait for interrupt"); + device->states[0].enter = at91_enter_idle; + device->states[0].exit_latency = 1; + device->states[0].target_residency = 10000; + device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; + strcpy(device->states[0].name, "WFI"); + strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and RAM self refresh state */ - driver->states[1].enter = at91_enter_idle; - driver->states[1].exit_latency = 10; - driver->states[1].target_residency = 10000; - driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; - strcpy(driver->states[1].name, "RAM_SR"); - strcpy(driver->states[1].desc, "WFI and RAM Self Refresh"); - - cpuidle_register_driver(&at91_idle_driver); + device->states[1].enter = at91_enter_idle; + device->states[1].exit_latency = 10; + device->states[1].target_residency = 10000; + device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; + strcpy(device->states[1].name, "RAM_SR"); + strcpy(device->states[1].desc, "WFI and RAM Self Refresh"); if (cpuidle_register_device(device)) { printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); diff --git a/trunk/arch/arm/mach-davinci/cpuidle.c b/trunk/arch/arm/mach-davinci/cpuidle.c index dbeeccd00173..bd59f31b8a95 100644 --- a/trunk/arch/arm/mach-davinci/cpuidle.c +++ b/trunk/arch/arm/mach-davinci/cpuidle.c @@ -78,11 +78,9 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = { /* Actual code that puts the SoC in different idle states */ static int davinci_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct davinci_ops *ops = cpuidle_get_statedata(state_usage); + struct davinci_ops *ops = cpuidle_get_statedata(state); struct timeval before, after; int idle_time; @@ -100,17 +98,13 @@ static int davinci_enter_idle(struct cpuidle_device *dev, local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - - dev->last_residency = idle_time; - - return index; + return idle_time; } static int __init davinci_cpuidle_probe(struct platform_device *pdev) { int ret; struct cpuidle_device *device; - struct cpuidle_driver *driver = &davinci_idle_driver; struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); @@ -122,33 +116,32 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) ddr2_reg_base = pdata->ddr2_ctlr_base; + ret = cpuidle_register_driver(&davinci_idle_driver); + if (ret) { + dev_err(&pdev->dev, "failed to register driver\n"); + return ret; + } + /* Wait for interrupt state */ - driver->states[0].enter = davinci_enter_idle; - driver->states[0].exit_latency = 1; - driver->states[0].target_residency = 10000; - driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; - strcpy(driver->states[0].name, "WFI"); - strcpy(driver->states[0].desc, "Wait for interrupt"); + device->states[0].enter = davinci_enter_idle; + device->states[0].exit_latency = 1; + device->states[0].target_residency = 10000; + device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; + strcpy(device->states[0].name, "WFI"); + strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and DDR self refresh state */ - driver->states[1].enter = davinci_enter_idle; - driver->states[1].exit_latency = 10; - driver->states[1].target_residency = 10000; - driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; - strcpy(driver->states[1].name, "DDR SR"); - strcpy(driver->states[1].desc, "WFI and DDR Self Refresh"); + device->states[1].enter = davinci_enter_idle; + device->states[1].exit_latency = 10; + device->states[1].target_residency = 10000; + device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; + strcpy(device->states[1].name, "DDR SR"); + strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); if (pdata->ddr2_pdown) davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; - cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); + cpuidle_set_statedata(&device->states[1], &davinci_states[1]); device->state_count = DAVINCI_CPUIDLE_MAX_STATES; - driver->state_count = DAVINCI_CPUIDLE_MAX_STATES; - - ret = cpuidle_register_driver(&davinci_idle_driver); - if (ret) { - dev_err(&pdev->dev, "failed to register driver\n"); - return ret; - } ret = cpuidle_register_device(device); if (ret) { diff --git a/trunk/arch/arm/mach-exynos4/cpuidle.c b/trunk/arch/arm/mach-exynos4/cpuidle.c index 35f6502144ae..bf7e96f2793a 100644 --- a/trunk/arch/arm/mach-exynos4/cpuidle.c +++ b/trunk/arch/arm/mach-exynos4/cpuidle.c @@ -16,8 +16,7 @@ #include static int exynos4_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index); + struct cpuidle_state *state); static struct cpuidle_state exynos4_cpuidle_set[] = { [0] = { @@ -38,8 +37,7 @@ static struct cpuidle_driver exynos4_idle_driver = { }; static int exynos4_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { struct timeval before, after; int idle_time; @@ -54,31 +52,29 @@ static int exynos4_enter_idle(struct cpuidle_device *dev, idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - dev->last_residency = idle_time; - return index; + return idle_time; } static int __init exynos4_init_cpuidle(void) { int i, max_cpuidle_state, cpu_id; struct cpuidle_device *device; - struct cpuidle_driver *drv = &exynos4_idle_driver; - - /* Setup cpuidle driver */ - drv->state_count = (sizeof(exynos4_cpuidle_set) / - sizeof(struct cpuidle_state)); - max_cpuidle_state = drv->state_count; - for (i = 0; i < max_cpuidle_state; i++) { - memcpy(&drv->states[i], &exynos4_cpuidle_set[i], - sizeof(struct cpuidle_state)); - } + cpuidle_register_driver(&exynos4_idle_driver); for_each_cpu(cpu_id, cpu_online_mask) { device = &per_cpu(exynos4_cpuidle_device, cpu_id); device->cpu = cpu_id; - device->state_count = drv->state_count; + device->state_count = (sizeof(exynos4_cpuidle_set) / + sizeof(struct cpuidle_state)); + + max_cpuidle_state = device->state_count; + + for (i = 0; i < max_cpuidle_state; i++) { + memcpy(&device->states[i], &exynos4_cpuidle_set[i], + sizeof(struct cpuidle_state)); + } if (cpuidle_register_device(device)) { printk(KERN_ERR "CPUidle register device failed\n,"); diff --git a/trunk/arch/arm/mach-kirkwood/cpuidle.c b/trunk/arch/arm/mach-kirkwood/cpuidle.c index ffd690dc3d33..f68d33f1f396 100644 --- a/trunk/arch/arm/mach-kirkwood/cpuidle.c +++ b/trunk/arch/arm/mach-kirkwood/cpuidle.c @@ -32,18 +32,17 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device); /* Actual code that puts the SoC in different idle states */ static int kirkwood_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { struct timeval before, after; int idle_time; local_irq_disable(); do_gettimeofday(&before); - if (index == 0) + if (state == &dev->states[0]) /* Wait for interrupt state */ cpu_do_idle(); - else if (index == 1) { + else if (state == &dev->states[1]) { /* * Following write will put DDR in self refresh. * Note that we have 256 cycles before DDR puts it @@ -58,40 +57,35 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev, local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - - /* Update last residency */ - dev->last_residency = idle_time; - - return index; + return idle_time; } /* Initialize CPU idle by registering the idle states */ static int kirkwood_init_cpuidle(void) { struct cpuidle_device *device; - struct cpuidle_driver *driver = &kirkwood_idle_driver; + + cpuidle_register_driver(&kirkwood_idle_driver); device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); device->state_count = KIRKWOOD_MAX_STATES; - driver->state_count = KIRKWOOD_MAX_STATES; /* Wait for interrupt state */ - driver->states[0].enter = kirkwood_enter_idle; - driver->states[0].exit_latency = 1; - driver->states[0].target_residency = 10000; - driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; - strcpy(driver->states[0].name, "WFI"); - strcpy(driver->states[0].desc, "Wait for interrupt"); + device->states[0].enter = kirkwood_enter_idle; + device->states[0].exit_latency = 1; + device->states[0].target_residency = 10000; + device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; + strcpy(device->states[0].name, "WFI"); + strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and DDR self refresh state */ - driver->states[1].enter = kirkwood_enter_idle; - driver->states[1].exit_latency = 10; - driver->states[1].target_residency = 10000; - driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; - strcpy(driver->states[1].name, "DDR SR"); - strcpy(driver->states[1].desc, "WFI and DDR Self Refresh"); + device->states[1].enter = kirkwood_enter_idle; + device->states[1].exit_latency = 10; + device->states[1].target_residency = 10000; + device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; + strcpy(device->states[1].name, "DDR SR"); + strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); - cpuidle_register_driver(&kirkwood_idle_driver); if (cpuidle_register_device(device)) { printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); return -EIO; diff --git a/trunk/arch/arm/mach-omap2/cpuidle34xx.c b/trunk/arch/arm/mach-omap2/cpuidle34xx.c index 1fe35c24fba2..4bf6e6e8b100 100644 --- a/trunk/arch/arm/mach-omap2/cpuidle34xx.c +++ b/trunk/arch/arm/mach-omap2/cpuidle34xx.c @@ -88,21 +88,17 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm, /** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device - * @drv: cpuidle driver - * @index: the index of state to be entered + * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { - struct omap3_idle_statedata *cx = - cpuidle_get_statedata(&dev->states_usage[index]); + struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; - int idle_time; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); @@ -117,7 +113,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, goto return_sleep_time; /* Deny idle for C1 */ - if (index == 0) { + if (state == &dev->states[0]) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } @@ -126,7 +122,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, omap_sram_idle(); /* Re-allow idle for C1 */ - if (index == 0) { + if (state == &dev->states[0]) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } @@ -138,38 +134,28 @@ static int omap3_enter_idle(struct cpuidle_device *dev, local_irq_enable(); local_fiq_enable(); - idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \ - USEC_PER_SEC; - - /* Update cpuidle counters */ - dev->last_residency = idle_time; - - return index; + return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; } /** * next_valid_state - Find next valid C-state * @dev: cpuidle device - * @drv: cpuidle driver - * @index: Index of currently selected c-state + * @state: Currently selected C-state * - * If the state corresponding to index is valid, index is returned back - * to the caller. Else, this function searches for a lower c-state which is - * still valid (as defined in omap3_power_states[]) and returns its index. + * If the current state is valid, it is returned back to the caller. + * Else, this function searches for a lower c-state which is still + * valid. * * A state is valid if the 'valid' field is enabled and * if it satisfies the enable_off_mode condition. */ -static int next_valid_state(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) +static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, + struct cpuidle_state *curr) { - struct cpuidle_state_usage *curr_usage = &dev->states_usage[index]; - struct cpuidle_state *curr = &drv->states[index]; - struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage); + struct cpuidle_state *next = NULL; + struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr); u32 mpu_deepest_state = PWRDM_POWER_RET; u32 core_deepest_state = PWRDM_POWER_RET; - int next_index = -1; if (enable_off_mode) { mpu_deepest_state = PWRDM_POWER_OFF; @@ -186,20 +172,20 @@ static int next_valid_state(struct cpuidle_device *dev, if ((cx->valid) && (cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) { - return index; + return curr; } else { int idx = OMAP3_NUM_STATES - 1; /* Reach the current state starting at highest C-state */ for (; idx >= 0; idx--) { - if (&drv->states[idx] == curr) { - next_index = idx; + if (&dev->states[idx] == curr) { + next = &dev->states[idx]; break; } } /* Should never hit this condition */ - WARN_ON(next_index == -1); + WARN_ON(next == NULL); /* * Drop to next valid state. @@ -207,44 +193,41 @@ static int next_valid_state(struct cpuidle_device *dev, */ idx--; for (; idx >= 0; idx--) { - cx = cpuidle_get_statedata(&dev->states_usage[idx]); + cx = cpuidle_get_statedata(&dev->states[idx]); if ((cx->valid) && (cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) { - next_index = idx; + next = &dev->states[idx]; break; } } /* * C1 is always valid. - * So, no need to check for 'next_index == -1' outside - * this loop. + * So, no need to check for 'next==NULL' outside this loop. */ } - return next_index; + return next; } /** * omap3_enter_idle_bm - Checks for any bus activity * @dev: cpuidle device - * @drv: cpuidle driver - * @index: array index of target state to be programmed + * @state: The target state to be programmed * * This function checks for any pending activity and then programs * the device to the specified or a safer state. */ static int omap3_enter_idle_bm(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { - int new_state_idx; + struct cpuidle_state *new_state; u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; struct omap3_idle_statedata *cx; int ret; if (!omap3_can_sleep()) { - new_state_idx = drv->safe_state_index; + new_state = dev->safe_state; goto select_state; } @@ -254,7 +237,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, */ cam_state = pwrdm_read_pwrst(cam_pd); if (cam_state == PWRDM_POWER_ON) { - new_state_idx = drv->safe_state_index; + new_state = dev->safe_state; goto select_state; } @@ -270,7 +253,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, * Prevent PER off if CORE is not in retention or off as this * would disable PER wakeups completely. */ - cx = cpuidle_get_statedata(&dev->states_usage[index]); + cx = cpuidle_get_statedata(state); core_next_state = cx->core_state; per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); if ((per_next_state == PWRDM_POWER_OFF) && @@ -281,10 +264,11 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_next_state); - new_state_idx = next_valid_state(dev, drv, index); + new_state = next_valid_state(dev, state); select_state: - ret = omap3_enter_idle(dev, drv, new_state_idx); + dev->last_state = new_state; + ret = omap3_enter_idle(dev, new_state); /* Restore original PER state if it was modified */ if (per_next_state != per_saved_state) @@ -317,31 +301,22 @@ struct cpuidle_driver omap3_idle_driver = { .owner = THIS_MODULE, }; -/* Helper to fill the C-state common data*/ -static inline void _fill_cstate(struct cpuidle_driver *drv, +/* Helper to fill the C-state common data and register the driver_data */ +static inline struct omap3_idle_statedata *_fill_cstate( + struct cpuidle_device *dev, int idx, const char *descr) { - struct cpuidle_state *state = &drv->states[idx]; + struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; + struct cpuidle_state *state = &dev->states[idx]; state->exit_latency = cpuidle_params_table[idx].exit_latency; state->target_residency = cpuidle_params_table[idx].target_residency; state->flags = CPUIDLE_FLAG_TIME_VALID; state->enter = omap3_enter_idle_bm; + cx->valid = cpuidle_params_table[idx].valid; sprintf(state->name, "C%d", idx + 1); strncpy(state->desc, descr, CPUIDLE_DESC_LEN); - -} - -/* Helper to register the driver_data */ -static inline struct omap3_idle_statedata *_fill_cstate_usage( - struct cpuidle_device *dev, - int idx) -{ - struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; - struct cpuidle_state_usage *state_usage = &dev->states_usage[idx]; - - cx->valid = cpuidle_params_table[idx].valid; - cpuidle_set_statedata(state_usage, cx); + cpuidle_set_statedata(state, cx); return cx; } @@ -355,7 +330,6 @@ static inline struct omap3_idle_statedata *_fill_cstate_usage( int __init omap3_idle_init(void) { struct cpuidle_device *dev; - struct cpuidle_driver *drv = &omap3_idle_driver; struct omap3_idle_statedata *cx; mpu_pd = pwrdm_lookup("mpu_pwrdm"); @@ -363,52 +337,44 @@ int __init omap3_idle_init(void) per_pd = pwrdm_lookup("per_pwrdm"); cam_pd = pwrdm_lookup("cam_pwrdm"); - - drv->safe_state_index = -1; + cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); /* C1 . MPU WFI + Core active */ - _fill_cstate(drv, 0, "MPU ON + CORE ON"); - (&drv->states[0])->enter = omap3_enter_idle; - drv->safe_state_index = 0; - cx = _fill_cstate_usage(dev, 0); + cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); + (&dev->states[0])->enter = omap3_enter_idle; + dev->safe_state = &dev->states[0]; cx->valid = 1; /* C1 is always valid */ cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; /* C2 . MPU WFI + Core inactive */ - _fill_cstate(drv, 1, "MPU ON + CORE ON"); - cx = _fill_cstate_usage(dev, 1); + cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; /* C3 . MPU CSWR + Core inactive */ - _fill_cstate(drv, 2, "MPU RET + CORE ON"); - cx = _fill_cstate_usage(dev, 2); + cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); cx->mpu_state = PWRDM_POWER_RET; cx->core_state = PWRDM_POWER_ON; /* C4 . MPU OFF + Core inactive */ - _fill_cstate(drv, 3, "MPU OFF + CORE ON"); - cx = _fill_cstate_usage(dev, 3); + cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_ON; /* C5 . MPU RET + Core RET */ - _fill_cstate(drv, 4, "MPU RET + CORE RET"); - cx = _fill_cstate_usage(dev, 4); + cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); cx->mpu_state = PWRDM_POWER_RET; cx->core_state = PWRDM_POWER_RET; /* C6 . MPU OFF + Core RET */ - _fill_cstate(drv, 5, "MPU OFF + CORE RET"); - cx = _fill_cstate_usage(dev, 5); + cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_RET; /* C7 . MPU OFF + Core OFF */ - _fill_cstate(drv, 6, "MPU OFF + CORE OFF"); - cx = _fill_cstate_usage(dev, 6); + cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); /* * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot * enable OFF mode in a stable form for previous revisions. @@ -422,9 +388,6 @@ int __init omap3_idle_init(void) cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_OFF; - drv->state_count = OMAP3_NUM_STATES; - cpuidle_register_driver(&omap3_idle_driver); - dev->state_count = OMAP3_NUM_STATES; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", diff --git a/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c b/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c index ad1012ad6b42..e4469e7233cb 100644 --- a/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -25,12 +25,11 @@ static unsigned long cpuidle_mode[] = { }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) + struct cpuidle_state *state) { unsigned long allowed_mode = arch_hwblk_sleep_mode(); ktime_t before, after; - int requested_state = index; + int requested_state = state - &dev->states[0]; int allowed_state; int k; @@ -47,13 +46,11 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev, */ k = min_t(int, allowed_state, requested_state); + dev->last_state = &dev->states[k]; before = ktime_get(); sh_mobile_call_standby(cpuidle_mode[k]); after = ktime_get(); - - dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10; - - return k; + return ktime_to_ns(ktime_sub(after, before)) >> 10; } static struct cpuidle_device cpuidle_dev; @@ -65,19 +62,19 @@ static struct cpuidle_driver cpuidle_driver = { void sh_mobile_setup_cpuidle(void) { struct cpuidle_device *dev = &cpuidle_dev; - struct cpuidle_driver *drv = &cpuidle_driver; struct cpuidle_state *state; int i; + cpuidle_register_driver(&cpuidle_driver); for (i = 0; i < CPUIDLE_STATE_MAX; i++) { - drv->states[i].name[0] = '\0'; - drv->states[i].desc[0] = '\0'; + dev->states[i].name[0] = '\0'; + dev->states[i].desc[0] = '\0'; } i = CPUIDLE_DRIVER_STATE_START; - state = &drv->states[i++]; + state = &dev->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); state->exit_latency = 1; @@ -87,10 +84,10 @@ void sh_mobile_setup_cpuidle(void) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; - drv->safe_state_index = i-1; + dev->safe_state = state; if (sh_mobile_sleep_supported & SUSP_SH_SF) { - state = &drv->states[i++]; + state = &dev->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); @@ -103,7 +100,7 @@ void sh_mobile_setup_cpuidle(void) } if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { - state = &drv->states[i++]; + state = &dev->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); @@ -115,10 +112,7 @@ void sh_mobile_setup_cpuidle(void) state->enter = cpuidle_sleep_enter; } - drv->state_count = i; dev->state_count = i; - cpuidle_register_driver(&cpuidle_driver); - cpuidle_register_device(dev); } diff --git a/trunk/arch/x86/platform/mrst/pmu.c b/trunk/arch/x86/platform/mrst/pmu.c index 9281da7d91bd..c0ac06da57ac 100644 --- a/trunk/arch/x86/platform/mrst/pmu.c +++ b/trunk/arch/x86/platform/mrst/pmu.c @@ -70,7 +70,7 @@ static struct mrst_device mrst_devs[] = { /* 24 */ { 0x4110, 0 }, /* Lincroft */ }; -/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */ +/* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */ static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0}; static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803, 0x0804, 0x0805, 0x080f, 0}; diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c index 9d7bc9f6b6cc..a4e0f1ba6040 100644 --- a/trunk/drivers/acpi/processor_driver.c +++ b/trunk/drivers/acpi/processor_driver.c @@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, if (action == CPU_ONLINE && pr) { acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_hotplug(pr); + acpi_processor_cst_has_changed(pr); acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } @@ -503,7 +503,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) acpi_processor_get_throttling_info(pr); acpi_processor_get_limit_info(pr); - if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) + + if (cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr, device); pr->cdev = thermal_cooling_device_register("Processor", device, @@ -799,9 +800,17 @@ static int __init acpi_processor_init(void) memset(&errata, 0, sizeof(errata)); + if (!cpuidle_register_driver(&acpi_idle_driver)) { + printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", + acpi_idle_driver.name); + } else { + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", + cpuidle_get_driver()->name); + } + result = acpi_bus_register_driver(&acpi_processor_driver); if (result < 0) - return result; + goto out_cpuidle; acpi_processor_install_hotplug_notify(); @@ -812,6 +821,11 @@ static int __init acpi_processor_init(void) acpi_processor_throttling_init(); return 0; + +out_cpuidle: + cpuidle_unregister_driver(&acpi_idle_driver); + + return result; } static void __exit acpi_processor_exit(void) diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index 24fe3afa7119..431ab11c8c1b 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -741,25 +741,22 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) /** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU - * @drv: cpuidle driver containing cpuidle state info - * @index: index of target state + * @state: the state data * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) + struct cpuidle_state *state) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) - return -EINVAL; + return 0; local_irq_disable(); @@ -767,7 +764,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); - return -EINVAL; + return 0; } lapic_timer_state_broadcast(pr, cx, 1); @@ -776,46 +773,36 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); - return index; + return idle_time; } /** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU - * @drv: cpuidle driver with cpuidle state information - * @index: the index of suggested state + * @state: the state data */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) + struct cpuidle_state *state) { struct acpi_processor *pr; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) - return -EINVAL; - - local_irq_disable(); + return 0; - if (acpi_idle_suspend) { - local_irq_enable(); - cpu_relax(); - return -EINVAL; - } + if (acpi_idle_suspend) + return(acpi_idle_enter_c1(dev, state)); + local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; @@ -828,7 +815,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); - return -EINVAL; + return 0; } } @@ -850,9 +837,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); @@ -864,7 +848,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; - return index; + return idle_time; } static int c3_cpu_count; @@ -873,43 +857,37 @@ static DEFINE_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU - * @drv: cpuidle driver containing state data - * @index: the index of suggested state + * @state: the state data * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) + struct cpuidle_state *state) { struct acpi_processor *pr; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) - return -EINVAL; - + return 0; - if (acpi_idle_suspend) { - cpu_relax(); - return -EINVAL; - } + if (acpi_idle_suspend) + return(acpi_idle_enter_c1(dev, state)); if (!cx->bm_sts_skip && acpi_idle_bm_check()) { - if (drv->safe_state_index >= 0) { - return drv->states[drv->safe_state_index].enter(dev, - drv, drv->safe_state_index); + if (dev->safe_state) { + dev->last_state = dev->safe_state; + return dev->safe_state->enter(dev, dev->safe_state); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); - return -EINVAL; + return 0; } } @@ -926,7 +904,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); - return -EINVAL; + return 0; } } @@ -976,9 +954,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); @@ -990,7 +965,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; - return index; + return idle_time; } struct cpuidle_driver acpi_idle_driver = { @@ -999,16 +974,14 @@ struct cpuidle_driver acpi_idle_driver = { }; /** - * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE - * device i.e. per-cpu data - * + * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE * @pr: the ACPI processor */ -static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) +static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; - struct cpuidle_state_usage *state_usage; + struct cpuidle_state *state; struct cpuidle_device *dev = &pr->power.dev; if (!pr->flags.power_setup_done) @@ -1019,62 +992,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) } dev->cpu = pr->id; - - if (max_cstate == 0) - max_cstate = 1; - - for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { - cx = &pr->power.states[i]; - state_usage = &dev->states_usage[count]; - - if (!cx->valid) - continue; - -#ifdef CONFIG_HOTPLUG_CPU - if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && - !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) - continue; -#endif - - cpuidle_set_statedata(state_usage, cx); - - count++; - if (count == CPUIDLE_STATE_MAX) - break; - } - - dev->state_count = count; - - if (!count) - return -EINVAL; - - return 0; -} - -/** - * acpi_processor_setup_cpuidle states- prepares and configures cpuidle - * global state data i.e. idle routines - * - * @pr: the ACPI processor - */ -static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) -{ - int i, count = CPUIDLE_DRIVER_STATE_START; - struct acpi_processor_cx *cx; - struct cpuidle_state *state; - struct cpuidle_driver *drv = &acpi_idle_driver; - - if (!pr->flags.power_setup_done) - return -EINVAL; - - if (pr->flags.power == 0) - return -EINVAL; - - drv->safe_state_index = -1; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { - drv->states[i].name[0] = '\0'; - drv->states[i].desc[0] = '\0'; + dev->states[i].name[0] = '\0'; + dev->states[i].desc[0] = '\0'; } if (max_cstate == 0) @@ -1082,6 +1002,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; + state = &dev->states[count]; if (!cx->valid) continue; @@ -1092,8 +1013,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif + cpuidle_set_statedata(state, cx); - state = &drv->states[count]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; @@ -1106,13 +1027,13 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_c1; - drv->safe_state_index = count; + dev->safe_state = state; break; case ACPI_STATE_C2: state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_simple; - drv->safe_state_index = count; + dev->safe_state = state; break; case ACPI_STATE_C3: @@ -1128,7 +1049,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) break; } - drv->state_count = count; + dev->state_count = count; if (!count) return -EINVAL; @@ -1136,7 +1057,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) return 0; } -int acpi_processor_hotplug(struct acpi_processor *pr) +int acpi_processor_cst_has_changed(struct acpi_processor *pr) { int ret = 0; @@ -1157,7 +1078,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr) cpuidle_disable_device(&pr->power.dev); acpi_processor_get_power_info(pr); if (pr->flags.power) { - acpi_processor_setup_cpuidle_cx(pr); + acpi_processor_setup_cpuidle(pr); ret = cpuidle_enable_device(&pr->power.dev); } cpuidle_resume_and_unlock(); @@ -1165,72 +1086,10 @@ int acpi_processor_hotplug(struct acpi_processor *pr) return ret; } -int acpi_processor_cst_has_changed(struct acpi_processor *pr) -{ - int cpu; - struct acpi_processor *_pr; - - if (disabled_by_idle_boot_param()) - return 0; - - if (!pr) - return -EINVAL; - - if (nocst) - return -ENODEV; - - if (!pr->flags.power_setup_done) - return -ENODEV; - - /* - * FIXME: Design the ACPI notification to make it once per - * system instead of once per-cpu. This condition is a hack - * to make the code that updates C-States be called once. - */ - - if (smp_processor_id() == 0 && - cpuidle_get_driver() == &acpi_idle_driver) { - - cpuidle_pause_and_lock(); - /* Protect against cpu-hotplug */ - get_online_cpus(); - - /* Disable all cpuidle devices */ - for_each_online_cpu(cpu) { - _pr = per_cpu(processors, cpu); - if (!_pr || !_pr->flags.power_setup_done) - continue; - cpuidle_disable_device(&_pr->power.dev); - } - - /* Populate Updated C-state information */ - acpi_processor_setup_cpuidle_states(pr); - - /* Enable all cpuidle devices */ - for_each_online_cpu(cpu) { - _pr = per_cpu(processors, cpu); - if (!_pr || !_pr->flags.power_setup_done) - continue; - acpi_processor_get_power_info(_pr); - if (_pr->flags.power) { - acpi_processor_setup_cpuidle_cx(_pr); - cpuidle_enable_device(&_pr->power.dev); - } - } - put_online_cpus(); - cpuidle_resume_and_unlock(); - } - - return 0; -} - -static int acpi_processor_registered; - int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { acpi_status status = 0; - int retval; static int first_run; if (disabled_by_idle_boot_param()) @@ -1267,26 +1126,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, * platforms that only support C1. */ if (pr->flags.power) { - /* Register acpi_idle_driver if not already registered */ - if (!acpi_processor_registered) { - acpi_processor_setup_cpuidle_states(pr); - retval = cpuidle_register_driver(&acpi_idle_driver); - if (retval) - return retval; - printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", - acpi_idle_driver.name); - } - /* Register per-cpu cpuidle_device. Cpuidle driver - * must already be registered before registering device - */ - acpi_processor_setup_cpuidle_cx(pr); - retval = cpuidle_register_device(&pr->power.dev); - if (retval) { - if (acpi_processor_registered == 0) - cpuidle_unregister_driver(&acpi_idle_driver); - return retval; - } - acpi_processor_registered++; + acpi_processor_setup_cpuidle(pr); + if (cpuidle_register_device(&pr->power.dev)) + return -EIO; } return 0; } @@ -1297,13 +1139,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr, if (disabled_by_idle_boot_param()) return 0; - if (pr->flags.power) { - cpuidle_unregister_device(&pr->power.dev); - acpi_processor_registered--; - if (acpi_processor_registered == 0) - cpuidle_unregister_driver(&acpi_idle_driver); - } - + cpuidle_unregister_device(&pr->power.dev); pr->flags.power_setup_done = 0; + return 0; } diff --git a/trunk/drivers/cpuidle/cpuidle.c b/trunk/drivers/cpuidle/cpuidle.c index 7a57b11eaa8d..d4c542372886 100644 --- a/trunk/drivers/cpuidle/cpuidle.c +++ b/trunk/drivers/cpuidle/cpuidle.c @@ -61,9 +61,8 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); - struct cpuidle_driver *drv = cpuidle_get_driver(); struct cpuidle_state *target_state; - int next_state, entered_state; + int next_state; if (off) return -ENODEV; @@ -84,36 +83,45 @@ int cpuidle_idle_call(void) hrtimer_peek_ahead_timers(); #endif + /* + * Call the device's prepare function before calling the + * governor's select function. ->prepare gives the device's + * cpuidle driver a chance to update any dynamic information + * of its cpuidle states for the current idle period, e.g. + * state availability, latencies, residencies, etc. + */ + if (dev->prepare) + dev->prepare(dev); + /* ask the governor for the next state */ - next_state = cpuidle_curr_governor->select(drv, dev); + next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); return 0; } - target_state = &drv->states[next_state]; + target_state = &dev->states[next_state]; + + /* enter the state and update stats */ + dev->last_state = target_state; trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu); - entered_state = target_state->enter(dev, drv, next_state); + dev->last_residency = target_state->enter(dev, target_state); trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); - if (entered_state >= 0) { - /* Update cpuidle counters */ - /* This can be moved to within driver enter routine - * but that results in multiple copies of same code. - */ - dev->states_usage[entered_state].time += - (unsigned long long)dev->last_residency; - dev->states_usage[entered_state].usage++; - } + if (dev->last_state) + target_state = dev->last_state; + + target_state->time += (unsigned long long)dev->last_residency; + target_state->usage++; /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) - cpuidle_curr_governor->reflect(dev, entered_state); + cpuidle_curr_governor->reflect(dev); return 0; } @@ -164,11 +172,11 @@ void cpuidle_resume_and_unlock(void) EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); #ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int poll_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) { ktime_t t1, t2; s64 diff; + int ret; t1 = ktime_get(); local_irq_enable(); @@ -180,14 +188,15 @@ static int poll_idle(struct cpuidle_device *dev, if (diff > INT_MAX) diff = INT_MAX; - dev->last_residency = (int) diff; - - return index; + ret = (int) diff; + return ret; } -static void poll_idle_init(struct cpuidle_driver *drv) +static void poll_idle_init(struct cpuidle_device *dev) { - struct cpuidle_state *state = &drv->states[0]; + struct cpuidle_state *state = &dev->states[0]; + + cpuidle_set_statedata(state, NULL); snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); @@ -198,7 +207,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) state->enter = poll_idle; } #else -static void poll_idle_init(struct cpuidle_driver *drv) {} +static void poll_idle_init(struct cpuidle_device *dev) {} #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ /** @@ -225,20 +234,21 @@ int cpuidle_enable_device(struct cpuidle_device *dev) return ret; } - poll_idle_init(cpuidle_get_driver()); + poll_idle_init(dev); if ((ret = cpuidle_add_state_sysfs(dev))) return ret; if (cpuidle_curr_governor->enable && - (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) + (ret = cpuidle_curr_governor->enable(dev))) goto fail_sysfs; for (i = 0; i < dev->state_count; i++) { - dev->states_usage[i].usage = 0; - dev->states_usage[i].time = 0; + dev->states[i].usage = 0; + dev->states[i].time = 0; } dev->last_residency = 0; + dev->last_state = NULL; smp_wmb(); @@ -272,7 +282,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) dev->enabled = 0; if (cpuidle_curr_governor->disable) - cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); + cpuidle_curr_governor->disable(dev); cpuidle_remove_state_sysfs(dev); enabled_devices--; @@ -300,6 +310,26 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) init_completion(&dev->kobj_unregister); + /* + * cpuidle driver should set the dev->power_specified bit + * before registering the device if the driver provides + * power_usage numbers. + * + * For those devices whose ->power_specified is not set, + * we fill in power_usage with decreasing values as the + * cpuidle code has an implicit assumption that state Cn + * uses less power than C(n-1). + * + * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned + * an power value of -1. So we use -2, -3, etc, for other + * c-states. + */ + if (!dev->power_specified) { + int i; + for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) + dev->states[i].power_usage = -1 - i; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); if ((ret = cpuidle_add_sysfs(sys_dev))) { diff --git a/trunk/drivers/cpuidle/driver.c b/trunk/drivers/cpuidle/driver.c index 284d7af5a9c8..3f7e3cedd133 100644 --- a/trunk/drivers/cpuidle/driver.c +++ b/trunk/drivers/cpuidle/driver.c @@ -17,30 +17,6 @@ static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); -static void __cpuidle_register_driver(struct cpuidle_driver *drv) -{ - int i; - /* - * cpuidle driver should set the drv->power_specified bit - * before registering if the driver provides - * power_usage numbers. - * - * If power_specified is not set, - * we fill in power_usage with decreasing values as the - * cpuidle code has an implicit assumption that state Cn - * uses less power than C(n-1). - * - * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned - * an power value of -1. So we use -2, -3, etc, for other - * c-states. - */ - if (!drv->power_specified) { - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) - drv->states[i].power_usage = -1 - i; - } -} - - /** * cpuidle_register_driver - registers a driver * @drv: the driver @@ -58,7 +34,6 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) spin_unlock(&cpuidle_driver_lock); return -EBUSY; } - __cpuidle_register_driver(drv); cpuidle_curr_driver = drv; spin_unlock(&cpuidle_driver_lock); diff --git a/trunk/drivers/cpuidle/governors/ladder.c b/trunk/drivers/cpuidle/governors/ladder.c index ef6b9e4727a7..12c98900dcf8 100644 --- a/trunk/drivers/cpuidle/governors/ladder.c +++ b/trunk/drivers/cpuidle/governors/ladder.c @@ -60,11 +60,9 @@ static inline void ladder_do_selection(struct ladder_device *ldev, /** * ladder_select_state - selects the next state to enter - * @drv: cpuidle driver * @dev: the CPU */ -static int ladder_select_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev) +static int ladder_select_state(struct cpuidle_device *dev) { struct ladder_device *ldev = &__get_cpu_var(ladder_devices); struct ladder_device_state *last_state; @@ -79,17 +77,15 @@ static int ladder_select_state(struct cpuidle_driver *drv, last_state = &ldev->states[last_idx]; - if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { - last_residency = cpuidle_get_last_residency(dev) - \ - drv->states[last_idx].exit_latency; - } + if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) + last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; else last_residency = last_state->threshold.promotion_time + 1; /* consider promotion */ - if (last_idx < drv->state_count - 1 && + if (last_idx < dev->state_count - 1 && last_residency > last_state->threshold.promotion_time && - drv->states[last_idx + 1].exit_latency <= latency_req) { + dev->states[last_idx + 1].exit_latency <= latency_req) { last_state->stats.promotion_count++; last_state->stats.demotion_count = 0; if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { @@ -100,11 +96,11 @@ static int ladder_select_state(struct cpuidle_driver *drv, /* consider demotion */ if (last_idx > CPUIDLE_DRIVER_STATE_START && - drv->states[last_idx].exit_latency > latency_req) { + dev->states[last_idx].exit_latency > latency_req) { int i; for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { - if (drv->states[i].exit_latency <= latency_req) + if (dev->states[i].exit_latency <= latency_req) break; } ladder_do_selection(ldev, last_idx, i); @@ -127,11 +123,9 @@ static int ladder_select_state(struct cpuidle_driver *drv, /** * ladder_enable_device - setup for the governor - * @drv: cpuidle driver * @dev: the CPU */ -static int ladder_enable_device(struct cpuidle_driver *drv, - struct cpuidle_device *dev) +static int ladder_enable_device(struct cpuidle_device *dev) { int i; struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); @@ -140,8 +134,8 @@ static int ladder_enable_device(struct cpuidle_driver *drv, ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; - for (i = 0; i < drv->state_count; i++) { - state = &drv->states[i]; + for (i = 0; i < dev->state_count; i++) { + state = &dev->states[i]; lstate = &ldev->states[i]; lstate->stats.promotion_count = 0; @@ -150,7 +144,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv, lstate->threshold.promotion_count = PROMOTION_COUNT; lstate->threshold.demotion_count = DEMOTION_COUNT; - if (i < drv->state_count - 1) + if (i < dev->state_count - 1) lstate->threshold.promotion_time = state->exit_latency; if (i > 0) lstate->threshold.demotion_time = state->exit_latency; @@ -159,24 +153,11 @@ static int ladder_enable_device(struct cpuidle_driver *drv, return 0; } -/** - * ladder_reflect - update the correct last_state_idx - * @dev: the CPU - * @index: the index of actual state entered - */ -static void ladder_reflect(struct cpuidle_device *dev, int index) -{ - struct ladder_device *ldev = &__get_cpu_var(ladder_devices); - if (index > 0) - ldev->last_state_idx = index; -} - static struct cpuidle_governor ladder_governor = { .name = "ladder", .rating = 10, .enable = ladder_enable_device, .select = ladder_select_state, - .reflect = ladder_reflect, .owner = THIS_MODULE, }; diff --git a/trunk/drivers/cpuidle/governors/menu.c b/trunk/drivers/cpuidle/governors/menu.c index bcbe88142135..c47f3d09c1ee 100644 --- a/trunk/drivers/cpuidle/governors/menu.c +++ b/trunk/drivers/cpuidle/governors/menu.c @@ -182,7 +182,7 @@ static inline int performance_multiplier(void) static DEFINE_PER_CPU(struct menu_device, menu_devices); -static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); +static void menu_update(struct cpuidle_device *dev); /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ static u64 div_round64(u64 dividend, u32 divisor) @@ -228,10 +228,9 @@ static void detect_repeating_patterns(struct menu_device *data) /** * menu_select - selects the next idle state to enter - * @drv: cpuidle driver containing state data * @dev: the CPU */ -static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) +static int menu_select(struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); @@ -241,7 +240,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) struct timespec t; if (data->needs_update) { - menu_update(drv, dev); + menu_update(dev); data->needs_update = 0; } @@ -286,9 +285,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) * Find the idle state with the lowest power while satisfying * our constraints. */ - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { - struct cpuidle_state *s = &drv->states[i]; + for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { + struct cpuidle_state *s = &dev->states[i]; + if (s->flags & CPUIDLE_FLAG_IGNORE) + continue; if (s->target_residency > data->predicted_us) continue; if (s->exit_latency > latency_req) @@ -309,30 +310,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) /** * menu_reflect - records that data structures need update * @dev: the CPU - * @index: the index of actual entered state * * NOTE: it's important to be fast here because this operation will add to * the overall exit latency. */ -static void menu_reflect(struct cpuidle_device *dev, int index) +static void menu_reflect(struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); - data->last_state_idx = index; - if (index >= 0) - data->needs_update = 1; + data->needs_update = 1; } /** * menu_update - attempts to guess what happened after entry - * @drv: cpuidle driver containing state data * @dev: the CPU */ -static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) +static void menu_update(struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); int last_idx = data->last_state_idx; unsigned int last_idle_us = cpuidle_get_last_residency(dev); - struct cpuidle_state *target = &drv->states[last_idx]; + struct cpuidle_state *target = &dev->states[last_idx]; unsigned int measured_us; u64 new_factor; @@ -386,11 +383,9 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) /** * menu_enable_device - scans a CPU's states and does setup - * @drv: cpuidle driver * @dev: the CPU */ -static int menu_enable_device(struct cpuidle_driver *drv, - struct cpuidle_device *dev) +static int menu_enable_device(struct cpuidle_device *dev) { struct menu_device *data = &per_cpu(menu_devices, dev->cpu); diff --git a/trunk/drivers/cpuidle/sysfs.c b/trunk/drivers/cpuidle/sysfs.c index 1e756e160dca..be7917ec40c9 100644 --- a/trunk/drivers/cpuidle/sysfs.c +++ b/trunk/drivers/cpuidle/sysfs.c @@ -216,8 +216,7 @@ static struct kobj_type ktype_cpuidle = { struct cpuidle_state_attr { struct attribute attr; - ssize_t (*show)(struct cpuidle_state *, \ - struct cpuidle_state_usage *, char *); + ssize_t (*show)(struct cpuidle_state *, char *); ssize_t (*store)(struct cpuidle_state *, const char *, size_t); }; @@ -225,22 +224,19 @@ struct cpuidle_state_attr { static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) #define define_show_state_function(_name) \ -static ssize_t show_state_##_name(struct cpuidle_state *state, \ - struct cpuidle_state_usage *state_usage, char *buf) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ { \ return sprintf(buf, "%u\n", state->_name);\ } #define define_show_state_ull_function(_name) \ -static ssize_t show_state_##_name(struct cpuidle_state *state, \ - struct cpuidle_state_usage *state_usage, char *buf) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ { \ - return sprintf(buf, "%llu\n", state_usage->_name);\ + return sprintf(buf, "%llu\n", state->_name);\ } #define define_show_state_str_function(_name) \ -static ssize_t show_state_##_name(struct cpuidle_state *state, \ - struct cpuidle_state_usage *state_usage, char *buf) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ { \ if (state->_name[0] == '\0')\ return sprintf(buf, "\n");\ @@ -273,18 +269,16 @@ static struct attribute *cpuidle_state_default_attrs[] = { #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) #define kobj_to_state(k) (kobj_to_state_obj(k)->state) -#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) static ssize_t cpuidle_state_show(struct kobject * kobj, struct attribute * attr ,char * buf) { int ret = -EIO; struct cpuidle_state *state = kobj_to_state(kobj); - struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); if (cattr->show) - ret = cattr->show(state, state_usage, buf); + ret = cattr->show(state, buf); return ret; } @@ -322,15 +316,13 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) { int i, ret = -ENOMEM; struct cpuidle_state_kobj *kobj; - struct cpuidle_driver *drv = cpuidle_get_driver(); /* state statistics */ for (i = 0; i < device->state_count; i++) { kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); if (!kobj) goto error_state; - kobj->state = &drv->states[i]; - kobj->state_usage = &device->states_usage[i]; + kobj->state = &device->states[i]; init_completion(&kobj->kobj_unregister); ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index 5be9d599ff6b..a46dddf61078 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -81,8 +81,7 @@ static unsigned int mwait_substates; static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; -static int intel_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index); +static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); static struct cpuidle_state *cpuidle_state_table; @@ -110,6 +109,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C1 */ .name = "C1-NHM", .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 3, .target_residency = 6, @@ -117,6 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C2 */ .name = "C3-NHM", .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, .target_residency = 80, @@ -124,6 +125,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C3 */ .name = "C6-NHM", .desc = "MWAIT 0x20", + .driver_data = (void *) 0x20, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 800, @@ -135,6 +137,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C1 */ .name = "C1-SNB", .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 1, @@ -142,6 +145,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C2 */ .name = "C3-SNB", .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 211, @@ -149,6 +153,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C3 */ .name = "C6-SNB", .desc = "MWAIT 0x20", + .driver_data = (void *) 0x20, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, .target_residency = 345, @@ -156,6 +161,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C4 */ .name = "C7-SNB", .desc = "MWAIT 0x30", + .driver_data = (void *) 0x30, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, .target_residency = 345, @@ -167,6 +173,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, @@ -174,6 +181,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C2 */ .name = "C2-ATM", .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 20, .target_residency = 80, @@ -182,6 +190,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C4 */ .name = "C4-ATM", .desc = "MWAIT 0x30", + .driver_data = (void *) 0x30, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, @@ -190,55 +199,23 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", + .driver_data = (void *) 0x52, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, }; -static int get_driver_data(int cstate) -{ - int driver_data; - switch (cstate) { - - case 1: /* MWAIT C1 */ - driver_data = 0x00; - break; - case 2: /* MWAIT C2 */ - driver_data = 0x10; - break; - case 3: /* MWAIT C3 */ - driver_data = 0x20; - break; - case 4: /* MWAIT C4 */ - driver_data = 0x30; - break; - case 5: /* MWAIT C5 */ - driver_data = 0x40; - break; - case 6: /* MWAIT C6 */ - driver_data = 0x52; - break; - default: - driver_data = 0x00; - } - return driver_data; -} - /** * intel_idle * @dev: cpuidle_device - * @drv: cpuidle driver - * @index: index of cpuidle state + * @state: cpuidle state * */ -static int intel_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { unsigned long ecx = 1; /* break on interrupt flag */ - struct cpuidle_state *state = &drv->states[index]; - struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; - unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); + unsigned long eax = (unsigned long)cpuidle_get_statedata(state); unsigned int cstate; ktime_t kt_before, kt_after; s64 usec_delta; @@ -279,10 +256,7 @@ static int intel_idle(struct cpuidle_device *dev, if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); - /* Update cpuidle counters */ - dev->last_residency = (int)usec_delta; - - return index; + return usec_delta; } static void __setup_broadcast_timer(void *arg) @@ -422,60 +396,6 @@ static void intel_idle_cpuidle_devices_uninit(void) free_percpu(intel_idle_cpuidle_devices); return; } -/* - * intel_idle_cpuidle_driver_init() - * allocate, initialize cpuidle_states - */ -static int intel_idle_cpuidle_driver_init(void) -{ - int cstate; - struct cpuidle_driver *drv = &intel_idle_driver; - - drv->state_count = 1; - - for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { - int num_substates; - - if (cstate > max_cstate) { - printk(PREFIX "max_cstate %d reached\n", - max_cstate); - break; - } - - /* does the state exist in CPUID.MWAIT? */ - num_substates = (mwait_substates >> ((cstate) * 4)) - & MWAIT_SUBSTATE_MASK; - if (num_substates == 0) - continue; - /* is the state not enabled? */ - if (cpuidle_state_table[cstate].enter == NULL) { - /* does the driver not know about the state? */ - if (*cpuidle_state_table[cstate].name == '\0') - pr_debug(PREFIX "unaware of model 0x%x" - " MWAIT %d please" - " contact lenb@kernel.org", - boot_cpu_data.x86_model, cstate); - continue; - } - - if ((cstate > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); - - drv->states[drv->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; - - drv->state_count += 1; - } - - if (auto_demotion_disable_flags) - smp_call_function(auto_demotion_disable, NULL, 1); - - return 0; -} - - /* * intel_idle_cpuidle_devices_init() * allocate, initialize, register cpuidle_devices @@ -510,11 +430,22 @@ static int intel_idle_cpuidle_devices_init(void) continue; /* is the state not enabled? */ if (cpuidle_state_table[cstate].enter == NULL) { + /* does the driver not know about the state? */ + if (*cpuidle_state_table[cstate].name == '\0') + pr_debug(PREFIX "unaware of model 0x%x" + " MWAIT %d please" + " contact lenb@kernel.org", + boot_cpu_data.x86_model, cstate); continue; } - dev->states_usage[dev->state_count].driver_data = - (void *)get_driver_data(cstate); + if ((cstate > 2) && + !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle" + " states deeper than C2"); + + dev->states[dev->state_count] = /* structure copy */ + cpuidle_state_table[cstate]; dev->state_count += 1; } @@ -527,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void) return -EIO; } } + if (auto_demotion_disable_flags) + smp_call_function(auto_demotion_disable, NULL, 1); return 0; } @@ -544,7 +477,6 @@ static int __init intel_idle_init(void) if (retval) return retval; - intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", diff --git a/trunk/include/acpi/processor.h b/trunk/include/acpi/processor.h index 610f6fb1bbc2..67055f180330 100644 --- a/trunk/include/acpi/processor.h +++ b/trunk/include/acpi/processor.h @@ -329,7 +329,6 @@ extern void acpi_processor_throttling_init(void); int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device); int acpi_processor_cst_has_changed(struct acpi_processor *pr); -int acpi_processor_hotplug(struct acpi_processor *pr); int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device); int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); diff --git a/trunk/include/linux/cpuidle.h b/trunk/include/linux/cpuidle.h index c90418822f40..b51629e15cfc 100644 --- a/trunk/include/linux/cpuidle.h +++ b/trunk/include/linux/cpuidle.h @@ -22,62 +22,57 @@ #define CPUIDLE_DESC_LEN 32 struct cpuidle_device; -struct cpuidle_driver; /**************************** * CPUIDLE DEVICE INTERFACE * ****************************/ -struct cpuidle_state_usage { - void *driver_data; - - unsigned long long usage; - unsigned long long time; /* in US */ -}; - struct cpuidle_state { char name[CPUIDLE_NAME_LEN]; char desc[CPUIDLE_DESC_LEN]; + void *driver_data; unsigned int flags; unsigned int exit_latency; /* in US */ unsigned int power_usage; /* in mW */ unsigned int target_residency; /* in US */ + unsigned long long usage; + unsigned long long time; /* in US */ + int (*enter) (struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index); + struct cpuidle_state *state); }; /* Idle State Flags */ #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) /** * cpuidle_get_statedata - retrieves private driver state data - * @st_usage: the state usage statistics + * @state: the state */ -static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage) +static inline void * cpuidle_get_statedata(struct cpuidle_state *state) { - return st_usage->driver_data; + return state->driver_data; } /** * cpuidle_set_statedata - stores private driver state data - * @st_usage: the state usage statistics + * @state: the state * @data: the private data */ static inline void -cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data) +cpuidle_set_statedata(struct cpuidle_state *state, void *data) { - st_usage->driver_data = data; + state->driver_data = data; } struct cpuidle_state_kobj { struct cpuidle_state *state; - struct cpuidle_state_usage *state_usage; struct completion kobj_unregister; struct kobject kobj; }; @@ -85,17 +80,22 @@ struct cpuidle_state_kobj { struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; + unsigned int power_specified:1; unsigned int cpu; int last_residency; int state_count; - struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; + struct cpuidle_state states[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; + struct cpuidle_state *last_state; struct list_head device_list; struct kobject kobj; struct completion kobj_unregister; void *governor_data; + struct cpuidle_state *safe_state; + + int (*prepare) (struct cpuidle_device *dev); }; DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); @@ -119,11 +119,6 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) struct cpuidle_driver { char name[CPUIDLE_NAME_LEN]; struct module *owner; - - unsigned int power_specified:1; - struct cpuidle_state states[CPUIDLE_STATE_MAX]; - int state_count; - int safe_state_index; }; #ifdef CONFIG_CPU_IDLE @@ -170,14 +165,11 @@ struct cpuidle_governor { struct list_head governor_list; unsigned int rating; - int (*enable) (struct cpuidle_driver *drv, - struct cpuidle_device *dev); - void (*disable) (struct cpuidle_driver *drv, - struct cpuidle_device *dev); + int (*enable) (struct cpuidle_device *dev); + void (*disable) (struct cpuidle_device *dev); - int (*select) (struct cpuidle_driver *drv, - struct cpuidle_device *dev); - void (*reflect) (struct cpuidle_device *dev, int index); + int (*select) (struct cpuidle_device *dev); + void (*reflect) (struct cpuidle_device *dev); struct module *owner; };