From 3c30862806f07c70bc7703729487e23ce3f8090f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 29 Apr 2009 12:47:09 +0200 Subject: [PATCH] --- yaml --- r: 147387 b: refs/heads/master c: 55de0f2e57994b525324bf0d04d242d9358a2417 h: refs/heads/master i: 147385: 375f35cc7a9d40120e65294d8acbe55ea1731b49 147383: af2f65649745f9f89620eddf4a3c90fd460aa529 v: v3 --- [refs] | 2 +- trunk/arch/x86/kernel/cpu/perf_counter.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/[refs] b/[refs] index 0cde3758df78..1897a6e43ea4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 26816c287e13eedc67bc4ed0cd40c138314b7c7d +refs/heads/master: 55de0f2e57994b525324bf0d04d242d9358a2417 diff --git a/trunk/arch/x86/kernel/cpu/perf_counter.c b/trunk/arch/x86/kernel/cpu/perf_counter.c index fa6541d781bc..5a52d73ccfa7 100644 --- a/trunk/arch/x86/kernel/cpu/perf_counter.c +++ b/trunk/arch/x86/kernel/cpu/perf_counter.c @@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter) * Save and restart an expired counter. Called by NMI contexts, * so it has to be careful about preempting normal counter ops: */ -static void perf_save_and_restart(struct perf_counter *counter) +static void intel_pmu_save_and_restart(struct perf_counter *counter) { struct hw_perf_counter *hwc = &counter->hw; int idx = hwc->idx; @@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); int ret = 0; - cpuc->throttle_ctrl = hw_perf_save_disable(); + cpuc->throttle_ctrl = intel_pmu_save_disable_all(); status = intel_pmu_get_status(cpuc->throttle_ctrl); if (!status) @@ -770,7 +770,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) if (!counter) continue; - perf_save_and_restart(counter); + intel_pmu_save_and_restart(counter); if (perf_counter_overflow(counter, nmi, regs, 0)) __x86_pmu_disable(counter, &counter->hw, bit); } @@ -788,7 +788,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) * Restore - do not reenable when global enable is off or throttled: */ if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) - hw_perf_restore(cpuc->throttle_ctrl); + intel_pmu_restore_all(cpuc->throttle_ctrl); return ret; }