From 5adf1335a93fe73e0a9d552a9fe3fd735afbb9b1 Mon Sep 17 00:00:00 2001 From: Deepthi Dharwar Date: Fri, 28 Oct 2011 16:20:09 +0530 Subject: [PATCH] --- yaml --- r: 275030 b: refs/heads/master c: e978aa7d7d57d04eb5f88a7507c4fb98577def77 h: refs/heads/master v: v3 --- [refs] | 2 +- .../ABI/stable/sysfs-acpi-pmprofile | 22 ------ trunk/arch/arm/mach-at91/cpuidle.c | 10 ++- trunk/arch/arm/mach-davinci/cpuidle.c | 9 ++- trunk/arch/arm/mach-exynos4/cpuidle.c | 7 +- trunk/arch/arm/mach-kirkwood/cpuidle.c | 12 ++- trunk/arch/arm/mach-omap2/cpuidle34xx.c | 67 ++++++++++------- trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c | 12 +-- trunk/drivers/acpi/acpica/hwregs.c | 11 ++- trunk/drivers/acpi/atomicio.c | 2 +- trunk/drivers/acpi/bus.c | 8 +- trunk/drivers/acpi/processor_idle.c | 75 ++++++++++++------- trunk/drivers/acpi/scan.c | 3 +- trunk/drivers/acpi/sysfs.c | 14 +--- trunk/drivers/cpuidle/cpuidle.c | 32 ++++---- trunk/drivers/cpuidle/governors/ladder.c | 13 ++++ trunk/drivers/cpuidle/governors/menu.c | 7 +- trunk/drivers/idle/intel_idle.c | 12 ++- trunk/drivers/pnp/pnpacpi/rsparser.c | 62 +++++++++++---- trunk/drivers/thermal/thermal_sys.c | 4 +- trunk/include/acpi/acpi_drivers.h | 2 +- trunk/include/acpi/actypes.h | 1 + trunk/include/linux/cpuidle.h | 7 +- 23 files changed, 231 insertions(+), 163 deletions(-) delete mode 100644 trunk/Documentation/ABI/stable/sysfs-acpi-pmprofile diff --git a/[refs] b/[refs] index 9e5f3968fad0..f38e3fd15247 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e751b759e82629798c4a5e4a87eb3a30c0510154 +refs/heads/master: e978aa7d7d57d04eb5f88a7507c4fb98577def77 diff --git a/trunk/Documentation/ABI/stable/sysfs-acpi-pmprofile b/trunk/Documentation/ABI/stable/sysfs-acpi-pmprofile deleted file mode 100644 index 964c7a8afb26..000000000000 --- a/trunk/Documentation/ABI/stable/sysfs-acpi-pmprofile +++ /dev/null @@ -1,22 +0,0 @@ -What: /sys/firmware/acpi/pm_profile -Date: 03-Nov-2011 -KernelVersion: v3.2 -Contact: linux-acpi@vger.kernel.org -Description: The ACPI pm_profile sysfs interface exports the platform - power management (and performance) requirement expectations - as provided by BIOS. The integer value is directly passed as - retrieved from the FADT ACPI table. -Values: For possible values see ACPI specification: - 5.2.9 Fixed ACPI Description Table (FADT) - Field: Preferred_PM_Profile - - Currently these values are defined by spec: - 0 Unspecified - 1 Desktop - 2 Mobile - 3 Workstation - 4 Enterprise Server - 5 SOHO Server - 6 Appliance PC - 7 Performance Server - >7 Reserved diff --git a/trunk/arch/arm/mach-at91/cpuidle.c b/trunk/arch/arm/mach-at91/cpuidle.c index 1cfeac1483d6..4696a0d61e2e 100644 --- a/trunk/arch/arm/mach-at91/cpuidle.c +++ b/trunk/arch/arm/mach-at91/cpuidle.c @@ -33,7 +33,7 @@ static struct cpuidle_driver at91_idle_driver = { /* Actual code that puts the SoC in different idle states */ static int at91_enter_idle(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { struct timeval before, after; int idle_time; @@ -41,10 +41,10 @@ static int at91_enter_idle(struct cpuidle_device *dev, local_irq_disable(); do_gettimeofday(&before); - if (state == &dev->states[0]) + if (index == 0) /* Wait for interrupt state */ cpu_do_idle(); - else if (state == &dev->states[1]) { + else if (index == 1) { asm("b 1f; .align 5; 1:"); asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */ saved_lpr = sdram_selfrefresh_enable(); @@ -55,7 +55,9 @@ static int at91_enter_idle(struct cpuidle_device *dev, local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - return idle_time; + + dev->last_residency = idle_time; + return index; } /* Initialize CPU idle by registering the idle states */ diff --git a/trunk/arch/arm/mach-davinci/cpuidle.c b/trunk/arch/arm/mach-davinci/cpuidle.c index bd59f31b8a95..ca8582a95ad9 100644 --- a/trunk/arch/arm/mach-davinci/cpuidle.c +++ b/trunk/arch/arm/mach-davinci/cpuidle.c @@ -78,9 +78,9 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = { /* Actual code that puts the SoC in different idle states */ static int davinci_enter_idle(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { - struct davinci_ops *ops = cpuidle_get_statedata(state); + struct davinci_ops *ops = cpuidle_get_statedata(&dev->states[index]); struct timeval before, after; int idle_time; @@ -98,7 +98,10 @@ static int davinci_enter_idle(struct cpuidle_device *dev, local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - return idle_time; + + dev->last_residency = idle_time; + + return index; } static int __init davinci_cpuidle_probe(struct platform_device *pdev) diff --git a/trunk/arch/arm/mach-exynos4/cpuidle.c b/trunk/arch/arm/mach-exynos4/cpuidle.c index bf7e96f2793a..ea026e72b977 100644 --- a/trunk/arch/arm/mach-exynos4/cpuidle.c +++ b/trunk/arch/arm/mach-exynos4/cpuidle.c @@ -16,7 +16,7 @@ #include static int exynos4_enter_idle(struct cpuidle_device *dev, - struct cpuidle_state *state); + int index); static struct cpuidle_state exynos4_cpuidle_set[] = { [0] = { @@ -37,7 +37,7 @@ static struct cpuidle_driver exynos4_idle_driver = { }; static int exynos4_enter_idle(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { struct timeval before, after; int idle_time; @@ -52,7 +52,8 @@ static int exynos4_enter_idle(struct cpuidle_device *dev, idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - return idle_time; + dev->last_residency = idle_time; + return index; } static int __init exynos4_init_cpuidle(void) diff --git a/trunk/arch/arm/mach-kirkwood/cpuidle.c b/trunk/arch/arm/mach-kirkwood/cpuidle.c index f68d33f1f396..358dd80b3a07 100644 --- a/trunk/arch/arm/mach-kirkwood/cpuidle.c +++ b/trunk/arch/arm/mach-kirkwood/cpuidle.c @@ -32,17 +32,17 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device); /* Actual code that puts the SoC in different idle states */ static int kirkwood_enter_idle(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { struct timeval before, after; int idle_time; local_irq_disable(); do_gettimeofday(&before); - if (state == &dev->states[0]) + if (index == 0) /* Wait for interrupt state */ cpu_do_idle(); - else if (state == &dev->states[1]) { + else if (index == 1) { /* * Following write will put DDR in self refresh. * Note that we have 256 cycles before DDR puts it @@ -57,7 +57,11 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev, local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); - return idle_time; + + /* Update last residency */ + dev->last_residency = idle_time; + + return index; } /* Initialize CPU idle by registering the idle states */ diff --git a/trunk/arch/arm/mach-omap2/cpuidle34xx.c b/trunk/arch/arm/mach-omap2/cpuidle34xx.c index 4bf6e6e8b100..58425c75f1b8 100644 --- a/trunk/arch/arm/mach-omap2/cpuidle34xx.c +++ b/trunk/arch/arm/mach-omap2/cpuidle34xx.c @@ -88,17 +88,19 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm, /** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device - * @state: The target state to be programmed + * @index: the index of state to be entered * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { - struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); + struct omap3_idle_statedata *cx = + cpuidle_get_statedata(&dev->states[index]); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; + int idle_time; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); @@ -113,7 +115,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, goto return_sleep_time; /* Deny idle for C1 */ - if (state == &dev->states[0]) { + if (index == 0) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } @@ -122,7 +124,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, omap_sram_idle(); /* Re-allow idle for C1 */ - if (state == &dev->states[0]) { + if (index == 0) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } @@ -134,28 +136,35 @@ static int omap3_enter_idle(struct cpuidle_device *dev, local_irq_enable(); local_fiq_enable(); - return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; + idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \ + USEC_PER_SEC; + + /* Update cpuidle counters */ + dev->last_residency = idle_time; + + return index; } /** * next_valid_state - Find next valid C-state * @dev: cpuidle device - * @state: Currently selected C-state + * @index: Index of currently selected c-state * - * If the current state is valid, it is returned back to the caller. - * Else, this function searches for a lower c-state which is still - * valid. + * If the state corresponding to index is valid, index is returned back + * to the caller. Else, this function searches for a lower c-state which is + * still valid (as defined in omap3_power_states[]) and returns its index. * * A state is valid if the 'valid' field is enabled and * if it satisfies the enable_off_mode condition. */ -static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, - struct cpuidle_state *curr) +static int next_valid_state(struct cpuidle_device *dev, + int index) { - struct cpuidle_state *next = NULL; + struct cpuidle_state *curr = &dev->states[index]; struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr); u32 mpu_deepest_state = PWRDM_POWER_RET; u32 core_deepest_state = PWRDM_POWER_RET; + int next_index = -1; if (enable_off_mode) { mpu_deepest_state = PWRDM_POWER_OFF; @@ -172,20 +181,20 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, if ((cx->valid) && (cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) { - return curr; + return index; } else { int idx = OMAP3_NUM_STATES - 1; /* Reach the current state starting at highest C-state */ for (; idx >= 0; idx--) { if (&dev->states[idx] == curr) { - next = &dev->states[idx]; + next_index = idx; break; } } /* Should never hit this condition */ - WARN_ON(next == NULL); + WARN_ON(next_index == -1); /* * Drop to next valid state. @@ -197,37 +206,39 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, if ((cx->valid) && (cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) { - next = &dev->states[idx]; + next_index = idx; break; } } /* * C1 is always valid. - * So, no need to check for 'next==NULL' outside this loop. + * So, no need to check for 'next_index == -1' outside + * this loop. */ } - return next; + return next_index; } /** * omap3_enter_idle_bm - Checks for any bus activity * @dev: cpuidle device - * @state: The target state to be programmed + * @index: array index of target state to be programmed * * This function checks for any pending activity and then programs * the device to the specified or a safer state. */ static int omap3_enter_idle_bm(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { - struct cpuidle_state *new_state; + struct cpuidle_state *state = &dev->states[index]; + int new_state_idx; u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; struct omap3_idle_statedata *cx; int ret; if (!omap3_can_sleep()) { - new_state = dev->safe_state; + new_state_idx = dev->safe_state_index; goto select_state; } @@ -237,7 +248,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, */ cam_state = pwrdm_read_pwrst(cam_pd); if (cam_state == PWRDM_POWER_ON) { - new_state = dev->safe_state; + new_state_idx = dev->safe_state_index; goto select_state; } @@ -264,11 +275,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_next_state); - new_state = next_valid_state(dev, state); + new_state_idx = next_valid_state(dev, index); select_state: - dev->last_state = new_state; - ret = omap3_enter_idle(dev, new_state); + ret = omap3_enter_idle(dev, new_state_idx); /* Restore original PER state if it was modified */ if (per_next_state != per_saved_state) @@ -339,11 +349,12 @@ int __init omap3_idle_init(void) cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); + dev->safe_state_index = -1; /* C1 . MPU WFI + Core active */ cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); (&dev->states[0])->enter = omap3_enter_idle; - dev->safe_state = &dev->states[0]; + dev->safe_state_index = 0; cx->valid = 1; /* C1 is always valid */ cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; diff --git a/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c b/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c index e4469e7233cb..7be50d4c4268 100644 --- a/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/trunk/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -25,11 +25,11 @@ static unsigned long cpuidle_mode[] = { }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { unsigned long allowed_mode = arch_hwblk_sleep_mode(); ktime_t before, after; - int requested_state = state - &dev->states[0]; + int requested_state = index; int allowed_state; int k; @@ -46,11 +46,13 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev, */ k = min_t(int, allowed_state, requested_state); - dev->last_state = &dev->states[k]; before = ktime_get(); sh_mobile_call_standby(cpuidle_mode[k]); after = ktime_get(); - return ktime_to_ns(ktime_sub(after, before)) >> 10; + + dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10; + + return k; } static struct cpuidle_device cpuidle_dev; @@ -84,7 +86,7 @@ void sh_mobile_setup_cpuidle(void) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; - dev->safe_state = state; + dev->safe_state_index = i-1; if (sh_mobile_sleep_supported & SUSP_SH_SF) { state = &dev->states[i++]; diff --git a/trunk/drivers/acpi/acpica/hwregs.c b/trunk/drivers/acpi/acpica/hwregs.c index cc70f3fdcdd1..55accb7018bb 100644 --- a/trunk/drivers/acpi/acpica/hwregs.c +++ b/trunk/drivers/acpi/acpica/hwregs.c @@ -269,17 +269,16 @@ acpi_status acpi_hw_clear_acpi_status(void) status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS); - - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); - - if (ACPI_FAILURE(status)) - goto exit; + if (ACPI_FAILURE(status)) { + goto unlock_and_exit; + } /* Clear the GPE Bits in all GPE registers in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); -exit: + unlock_and_exit: + acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } diff --git a/trunk/drivers/acpi/atomicio.c b/trunk/drivers/acpi/atomicio.c index f151afe61aab..7489b89c300f 100644 --- a/trunk/drivers/acpi/atomicio.c +++ b/trunk/drivers/acpi/atomicio.c @@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, { struct acpi_iomap *map; - map = __acpi_find_iomap(paddr, size/8); + map = __acpi_find_iomap(paddr, size); if (map) return map->vaddr + (paddr - map->paddr); else diff --git a/trunk/drivers/acpi/bus.c b/trunk/drivers/acpi/bus.c index 9ecec98bc76e..437ddbf0c49a 100644 --- a/trunk/drivers/acpi/bus.c +++ b/trunk/drivers/acpi/bus.c @@ -911,7 +911,10 @@ void __init acpi_early_init(void) } #endif - status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); + status = + acpi_enable_subsystem(~ + (ACPI_NO_HARDWARE_INIT | + ACPI_NO_ACPI_ENABLE)); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); goto error0; @@ -932,7 +935,8 @@ static int __init acpi_bus_init(void) acpi_os_initialize1(); - status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); + status = + acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to start the ACPI Interpreter\n"); diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index 431ab11c8c1b..9cd08cecb347 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -741,22 +741,24 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) /** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU - * @state: the state data + * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; + struct cpuidle_state *state = &dev->states[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); pr = __this_cpu_read(processors); + dev->last_residency = 0; if (unlikely(!pr)) - return 0; + return -EINVAL; local_irq_disable(); @@ -764,7 +766,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); - return 0; + return -EINVAL; } lapic_timer_state_broadcast(pr, cx, 1); @@ -773,37 +775,46 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); + /* Update device last_residency*/ + dev->last_residency = (int)idle_time; + local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); - return idle_time; + return index; } /** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU - * @state: the state data + * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { struct acpi_processor *pr; + struct cpuidle_state *state = &dev->states[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); + dev->last_residency = 0; if (unlikely(!pr)) - return 0; - - if (acpi_idle_suspend) - return(acpi_idle_enter_c1(dev, state)); + return -EINVAL; local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EINVAL; + } + + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -815,7 +826,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); - return 0; + return -EINVAL; } } @@ -837,6 +848,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); + /* Update device last_residency*/ + dev->last_residency = (int)idle_time; + /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); @@ -848,7 +862,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; - return idle_time; + return index; } static int c3_cpu_count; @@ -857,14 +871,15 @@ static DEFINE_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU - * @state: the state data + * @index: the index of suggested state * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, - struct cpuidle_state *state) + int index) { struct acpi_processor *pr; + struct cpuidle_state *state = &dev->states[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; s64 idle_time_ns; @@ -872,22 +887,26 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, pr = __this_cpu_read(processors); + dev->last_residency = 0; if (unlikely(!pr)) - return 0; + return -EINVAL; - if (acpi_idle_suspend) - return(acpi_idle_enter_c1(dev, state)); + + if (acpi_idle_suspend) { + cpu_relax(); + return -EINVAL; + } if (!cx->bm_sts_skip && acpi_idle_bm_check()) { - if (dev->safe_state) { - dev->last_state = dev->safe_state; - return dev->safe_state->enter(dev, dev->safe_state); + if (dev->safe_state_index >= 0) { + return dev->states[dev->safe_state_index].enter(dev, + dev->safe_state_index); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); - return 0; + return -EINVAL; } } @@ -904,7 +923,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); - return 0; + return -EINVAL; } } @@ -954,6 +973,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); + /* Update device last_residency*/ + dev->last_residency = (int)idle_time; + /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); @@ -965,7 +987,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; - return idle_time; + return index; } struct cpuidle_driver acpi_idle_driver = { @@ -992,6 +1014,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) } dev->cpu = pr->id; + dev->safe_state_index = -1; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { dev->states[i].name[0] = '\0'; dev->states[i].desc[0] = '\0'; @@ -1027,13 +1050,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_c1; - dev->safe_state = state; + dev->safe_state_index = count; break; case ACPI_STATE_C2: state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_simple; - dev->safe_state = state; + dev->safe_state_index = count; break; case ACPI_STATE_C3: diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index 8ab80bafe3f1..449c556274c0 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -1062,12 +1062,13 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id) if (!id) return; - id->id = kstrdup(dev_id, GFP_KERNEL); + id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL); if (!id->id) { kfree(id); return; } + strcpy(id->id, dev_id); list_add_tail(&id->list, &device->pnp.ids); } diff --git a/trunk/drivers/acpi/sysfs.c b/trunk/drivers/acpi/sysfs.c index 9f66181c814e..c538d0ef10ff 100644 --- a/trunk/drivers/acpi/sysfs.c +++ b/trunk/drivers/acpi/sysfs.c @@ -706,23 +706,11 @@ static void __exit interrupt_stats_exit(void) return; } -static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); -} - -static const struct device_attribute pm_profile_attr = - __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); - int __init acpi_sysfs_init(void) { int result; result = acpi_tables_sysfs_init(); - if (result) - return result; - result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); + return result; } diff --git a/trunk/drivers/cpuidle/cpuidle.c b/trunk/drivers/cpuidle/cpuidle.c index d4c542372886..88bd12104396 100644 --- a/trunk/drivers/cpuidle/cpuidle.c +++ b/trunk/drivers/cpuidle/cpuidle.c @@ -62,7 +62,7 @@ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_state *target_state; - int next_state; + int next_state, entered_state; if (off) return -ENODEV; @@ -102,26 +102,27 @@ int cpuidle_idle_call(void) target_state = &dev->states[next_state]; - /* enter the state and update stats */ - dev->last_state = target_state; - trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu); - dev->last_residency = target_state->enter(dev, target_state); + entered_state = target_state->enter(dev, next_state); trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); - if (dev->last_state) - target_state = dev->last_state; - - target_state->time += (unsigned long long)dev->last_residency; - target_state->usage++; + if (entered_state >= 0) { + /* Update cpuidle counters */ + /* This can be moved to within driver enter routine + * but that results in multiple copies of same code. + */ + dev->states[entered_state].time += + (unsigned long long)dev->last_residency; + dev->states[entered_state].usage++; + } /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) - cpuidle_curr_governor->reflect(dev); + cpuidle_curr_governor->reflect(dev, entered_state); return 0; } @@ -172,11 +173,10 @@ void cpuidle_resume_and_unlock(void) EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); #ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) +static int poll_idle(struct cpuidle_device *dev, int index) { ktime_t t1, t2; s64 diff; - int ret; t1 = ktime_get(); local_irq_enable(); @@ -188,8 +188,9 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) if (diff > INT_MAX) diff = INT_MAX; - ret = (int) diff; - return ret; + dev->last_residency = (int) diff; + + return index; } static void poll_idle_init(struct cpuidle_device *dev) @@ -248,7 +249,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev) dev->states[i].time = 0; } dev->last_residency = 0; - dev->last_state = NULL; smp_wmb(); diff --git a/trunk/drivers/cpuidle/governors/ladder.c b/trunk/drivers/cpuidle/governors/ladder.c index 12c98900dcf8..6a686a76711f 100644 --- a/trunk/drivers/cpuidle/governors/ladder.c +++ b/trunk/drivers/cpuidle/governors/ladder.c @@ -153,11 +153,24 @@ static int ladder_enable_device(struct cpuidle_device *dev) return 0; } +/** + * ladder_reflect - update the correct last_state_idx + * @dev: the CPU + * @index: the index of actual state entered + */ +static void ladder_reflect(struct cpuidle_device *dev, int index) +{ + struct ladder_device *ldev = &__get_cpu_var(ladder_devices); + if (index > 0) + ldev->last_state_idx = index; +} + static struct cpuidle_governor ladder_governor = { .name = "ladder", .rating = 10, .enable = ladder_enable_device, .select = ladder_select_state, + .reflect = ladder_reflect, .owner = THIS_MODULE, }; diff --git a/trunk/drivers/cpuidle/governors/menu.c b/trunk/drivers/cpuidle/governors/menu.c index c47f3d09c1ee..e4b200c5b441 100644 --- a/trunk/drivers/cpuidle/governors/menu.c +++ b/trunk/drivers/cpuidle/governors/menu.c @@ -310,14 +310,17 @@ static int menu_select(struct cpuidle_device *dev) /** * menu_reflect - records that data structures need update * @dev: the CPU + * @index: the index of actual entered state * * NOTE: it's important to be fast here because this operation will add to * the overall exit latency. */ -static void menu_reflect(struct cpuidle_device *dev) +static void menu_reflect(struct cpuidle_device *dev, int index) { struct menu_device *data = &__get_cpu_var(menu_devices); - data->needs_update = 1; + data->last_state_idx = index; + if (index >= 0) + data->needs_update = 1; } /** diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index a46dddf61078..a1c888d2216a 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -81,7 +81,7 @@ static unsigned int mwait_substates; static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; -static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); +static int intel_idle(struct cpuidle_device *dev, int index); static struct cpuidle_state *cpuidle_state_table; @@ -209,12 +209,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { /** * intel_idle * @dev: cpuidle_device - * @state: cpuidle state + * @index: index of cpuidle state * */ -static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) +static int intel_idle(struct cpuidle_device *dev, int index) { unsigned long ecx = 1; /* break on interrupt flag */ + struct cpuidle_state *state = &dev->states[index]; unsigned long eax = (unsigned long)cpuidle_get_statedata(state); unsigned int cstate; ktime_t kt_before, kt_after; @@ -256,7 +257,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); - return usec_delta; + /* Update cpuidle counters */ + dev->last_residency = (int)usec_delta; + + return index; } static void __setup_broadcast_timer(void *arg) diff --git a/trunk/drivers/pnp/pnpacpi/rsparser.c b/trunk/drivers/pnp/pnpacpi/rsparser.c index 5be4a392a3ae..bbf3edd85beb 100644 --- a/trunk/drivers/pnp/pnpacpi/rsparser.c +++ b/trunk/drivers/pnp/pnpacpi/rsparser.c @@ -509,12 +509,15 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, struct acpi_resource_dma *p) { int i; - unsigned char map = 0, flags; + unsigned char map = 0, flags = 0; + + if (p->channel_count == 0) + flags |= IORESOURCE_DISABLED; for (i = 0; i < p->channel_count; i++) map |= 1 << p->channels[i]; - flags = dma_flags(dev, p->type, p->bus_master, p->transfer); + flags |= dma_flags(dev, p->type, p->bus_master, p->transfer); pnp_register_dma_resource(dev, option_flags, map, flags); } @@ -524,14 +527,17 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, { int i; pnp_irq_mask_t map; - unsigned char flags; + unsigned char flags = 0; + + if (p->interrupt_count == 0) + flags |= IORESOURCE_DISABLED; bitmap_zero(map.bits, PNP_IRQ_NR); for (i = 0; i < p->interrupt_count; i++) if (p->interrupts[i]) __set_bit(p->interrupts[i], map.bits); - flags = irq_flags(p->triggering, p->polarity, p->sharable); + flags |= irq_flags(p->triggering, p->polarity, p->sharable); pnp_register_irq_resource(dev, option_flags, &map, flags); } @@ -541,7 +547,10 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, { int i; pnp_irq_mask_t map; - unsigned char flags; + unsigned char flags = 0; + + if (p->interrupt_count == 0) + flags |= IORESOURCE_DISABLED; bitmap_zero(map.bits, PNP_IRQ_NR); for (i = 0; i < p->interrupt_count; i++) { @@ -555,7 +564,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, } } - flags = irq_flags(p->triggering, p->polarity, p->sharable); + flags |= irq_flags(p->triggering, p->polarity, p->sharable); pnp_register_irq_resource(dev, option_flags, &map, flags); } @@ -565,8 +574,11 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, { unsigned char flags = 0; + if (io->address_length == 0) + flags |= IORESOURCE_DISABLED; + if (io->io_decode == ACPI_DECODE_16) - flags = IORESOURCE_IO_16BIT_ADDR; + flags |= IORESOURCE_IO_16BIT_ADDR; pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, io->alignment, io->address_length, flags); } @@ -575,8 +587,13 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, unsigned int option_flags, struct acpi_resource_fixed_io *io) { + unsigned char flags = 0; + + if (io->address_length == 0) + flags |= IORESOURCE_DISABLED; + pnp_register_port_resource(dev, option_flags, io->address, io->address, - 0, io->address_length, IORESOURCE_IO_FIXED); + 0, io->address_length, flags | IORESOURCE_IO_FIXED); } static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, @@ -585,8 +602,11 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, { unsigned char flags = 0; + if (p->address_length == 0) + flags |= IORESOURCE_DISABLED; + if (p->write_protect == ACPI_READ_WRITE_MEMORY) - flags = IORESOURCE_MEM_WRITEABLE; + flags |= IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, p->alignment, p->address_length, flags); } @@ -597,8 +617,11 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, { unsigned char flags = 0; + if (p->address_length == 0) + flags |= IORESOURCE_DISABLED; + if (p->write_protect == ACPI_READ_WRITE_MEMORY) - flags = IORESOURCE_MEM_WRITEABLE; + flags |= IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, p->alignment, p->address_length, flags); } @@ -609,8 +632,11 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, { unsigned char flags = 0; + if (p->address_length == 0) + flags |= IORESOURCE_DISABLED; + if (p->write_protect == ACPI_READ_WRITE_MEMORY) - flags = IORESOURCE_MEM_WRITEABLE; + flags |= IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->address, p->address, 0, p->address_length, flags); } @@ -630,16 +656,19 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, return; } + if (p->address_length == 0) + flags |= IORESOURCE_DISABLED; + if (p->resource_type == ACPI_MEMORY_RANGE) { if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) - flags = IORESOURCE_MEM_WRITEABLE; + flags |= IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, flags); } else if (p->resource_type == ACPI_IO_RANGE) pnp_register_port_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, - IORESOURCE_IO_FIXED); + flags | IORESOURCE_IO_FIXED); } static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, @@ -649,16 +678,19 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, struct acpi_resource_extended_address64 *p = &r->data.ext_address64; unsigned char flags = 0; + if (p->address_length == 0) + flags |= IORESOURCE_DISABLED; + if (p->resource_type == ACPI_MEMORY_RANGE) { if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) - flags = IORESOURCE_MEM_WRITEABLE; + flags |= IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, flags); } else if (p->resource_type == ACPI_IO_RANGE) pnp_register_port_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, - IORESOURCE_IO_FIXED); + flags | IORESOURCE_IO_FIXED); } struct acpipnp_parse_option_s { diff --git a/trunk/drivers/thermal/thermal_sys.c b/trunk/drivers/thermal/thermal_sys.c index dd9a5743fa99..708f8e92771a 100644 --- a/trunk/drivers/thermal/thermal_sys.c +++ b/trunk/drivers/thermal/thermal_sys.c @@ -678,10 +678,10 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, return; if (delay > 1000) - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), + schedule_delayed_work(&(tz->poll_queue), round_jiffies(msecs_to_jiffies(delay))); else - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), + schedule_delayed_work(&(tz->poll_queue), msecs_to_jiffies(delay)); } diff --git a/trunk/include/acpi/acpi_drivers.h b/trunk/include/acpi/acpi_drivers.h index bb145e4b935e..e49c36d38d7e 100644 --- a/trunk/include/acpi/acpi_drivers.h +++ b/trunk/include/acpi/acpi_drivers.h @@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb) { } static inline int register_hotplug_dock_device(acpi_handle handle, - const struct acpi_dock_ops *ops, + struct acpi_dock_ops *ops, void *context) { return -ENODEV; diff --git a/trunk/include/acpi/actypes.h b/trunk/include/acpi/actypes.h index ed73f6705c86..b67231bef632 100644 --- a/trunk/include/acpi/actypes.h +++ b/trunk/include/acpi/actypes.h @@ -470,6 +470,7 @@ typedef u64 acpi_integer; */ #define ACPI_FULL_INITIALIZATION 0x00 #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 +#define ACPI_NO_HARDWARE_INIT 0x02 #define ACPI_NO_EVENT_INIT 0x04 #define ACPI_NO_HANDLER_INIT 0x08 #define ACPI_NO_ACPI_ENABLE 0x10 diff --git a/trunk/include/linux/cpuidle.h b/trunk/include/linux/cpuidle.h index b51629e15cfc..8da811bcdbdb 100644 --- a/trunk/include/linux/cpuidle.h +++ b/trunk/include/linux/cpuidle.h @@ -42,7 +42,7 @@ struct cpuidle_state { unsigned long long time; /* in US */ int (*enter) (struct cpuidle_device *dev, - struct cpuidle_state *state); + int index); }; /* Idle State Flags */ @@ -87,13 +87,12 @@ struct cpuidle_device { int state_count; struct cpuidle_state states[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; - struct cpuidle_state *last_state; struct list_head device_list; struct kobject kobj; struct completion kobj_unregister; void *governor_data; - struct cpuidle_state *safe_state; + int safe_state_index; int (*prepare) (struct cpuidle_device *dev); }; @@ -169,7 +168,7 @@ struct cpuidle_governor { void (*disable) (struct cpuidle_device *dev); int (*select) (struct cpuidle_device *dev); - void (*reflect) (struct cpuidle_device *dev); + void (*reflect) (struct cpuidle_device *dev, int index); struct module *owner; };