From 64317df48bc1062c47a9bd16c489171e4061dc2a Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 6 Apr 2009 18:51:29 -0700 Subject: [PATCH] --- yaml --- r: 142831 b: refs/heads/master c: db954b5898dd3ef3ef93f4144158ea8f97deb058 h: refs/heads/master i: 142829: f852ea8ac38fa7af99c890728a7cac0917a93316 142827: bf2d851f06c4498b7b6eb1746f95c241c828c2a1 142823: 00c2f4fe38c1363c5ed919b1804ed55c449aaf16 142815: 61d1d0d25de5fb9164e09d8f38a394ddd9575d58 v: v3 --- [refs] | 2 +- trunk/arch/x86/include/asm/cpufeature.h | 1 + trunk/arch/x86/kernel/apic/apic.c | 6 ++ .../x86/kernel/cpu/addon_cpuid_features.c | 1 + .../x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 55 +++++++++---------- trunk/drivers/acpi/processor_idle.c | 3 + 6 files changed, 38 insertions(+), 30 deletions(-) diff --git a/[refs] b/[refs] index 8b57bdcc9161..ccbe6e49abee 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 18b2646fe3babeb40b34a0c1751e0bf5adfdc64c +refs/heads/master: db954b5898dd3ef3ef93f4144158ea8f97deb058 diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 0beba0d1468d..bb83b1c397aa 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -154,6 +154,7 @@ * CPUID levels like 0x6, 0xA etc */ #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ /* Virtualization flags: Linux defined */ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c index 098ec84b8c00..f2870920f246 100644 --- a/trunk/arch/x86/kernel/apic/apic.c +++ b/trunk/arch/x86/kernel/apic/apic.c @@ -431,6 +431,12 @@ static void __cpuinit setup_APIC_timer(void) { struct clock_event_device *levt = &__get_cpu_var(lapic_events); + if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { + lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; + /* Make LAPIC timer preferrable over percpu HPET */ + lapic_clockevent.rating = 150; + } + memcpy(levt, &lapic_clockevent, sizeof(*levt)); levt->cpumask = cpumask_of(smp_processor_id()); diff --git a/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c b/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c index 8220ae69849d..c965e5212714 100644 --- a/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/trunk/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, + { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, { 0, 0, 0, 0 } }; diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 9d3af380c6bd..19f6b9d27e83 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -68,7 +68,6 @@ struct acpi_cpufreq_data { unsigned int max_freq; unsigned int resume; unsigned int cpu_feature; - u64 saved_aperf, saved_mperf; }; static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); @@ -242,23 +241,26 @@ static u32 get_cur_val(const struct cpumask *mask) return cmd.val; } -struct perf_pair { +struct perf_cur { union { struct { u32 lo; u32 hi; } split; u64 whole; - } aperf, mperf; + } aperf_cur, mperf_cur; }; static long read_measured_perf_ctrs(void *_cur) { - struct perf_pair *cur = _cur; + struct perf_cur *cur = _cur; - rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); - rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); + rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); + rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); + + wrmsr(MSR_IA32_APERF, 0, 0); + wrmsr(MSR_IA32_MPERF, 0, 0); return 0; } @@ -279,57 +281,52 @@ static long read_measured_perf_ctrs(void *_cur) static unsigned int get_measured_perf(struct cpufreq_policy *policy, unsigned int cpu) { - struct perf_pair readin, cur; + struct perf_cur cur; unsigned int perf_percent; unsigned int retval; - if (!work_on_cpu(cpu, read_measured_perf_ctrs, &readin)) + if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) return 0; - cur.aperf.whole = readin.aperf.whole - - per_cpu(drv_data, cpu)->saved_aperf; - cur.mperf.whole = readin.mperf.whole - - per_cpu(drv_data, cpu)->saved_mperf; - per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; - per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; - #ifdef __i386__ /* * We dont want to do 64 bit divide with 32 bit kernel * Get an approximate value. Return failure in case we cannot get * an approximate value. */ - if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { + if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { int shift_count; u32 h; - h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); + h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); shift_count = fls(h); - cur.aperf.whole >>= shift_count; - cur.mperf.whole >>= shift_count; + cur.aperf_cur.whole >>= shift_count; + cur.mperf_cur.whole >>= shift_count; } - if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { + if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { int shift_count = 7; - cur.aperf.split.lo >>= shift_count; - cur.mperf.split.lo >>= shift_count; + cur.aperf_cur.split.lo >>= shift_count; + cur.mperf_cur.split.lo >>= shift_count; } - if (cur.aperf.split.lo && cur.mperf.split.lo) - perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; + if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) + perf_percent = (cur.aperf_cur.split.lo * 100) / + cur.mperf_cur.split.lo; else perf_percent = 0; #else - if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { + if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { int shift_count = 7; - cur.aperf.whole >>= shift_count; - cur.mperf.whole >>= shift_count; + cur.aperf_cur.whole >>= shift_count; + cur.mperf_cur.whole >>= shift_count; } - if (cur.aperf.whole && cur.mperf.whole) - perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; + if (cur.aperf_cur.whole && cur.mperf_cur.whole) + perf_percent = (cur.aperf_cur.whole * 100) / + cur.mperf_cur.whole; else perf_percent = 0; diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index 4e6e758bd397..6fe121434ffb 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -145,6 +145,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_power *pwr = &pr->power; u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; + if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) + return; + /* * Check, if one of the previous states already marked the lapic * unstable