Skip to content

Commit

Permalink
perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_rest…
Browse files Browse the repository at this point in the history
…ore_all()

MSR reads and writes are expensive. This patch adds checks to avoid
its usage where possible.

[ Impact: micro-optimization on AMD CPUs ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Apr 29, 2009
1 parent 4138960 commit 4295ee6
Showing 1 changed file with 14 additions and 10 deletions.
24 changes: 14 additions & 10 deletions arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void)
for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val;

if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}

return enabled;
Expand Down Expand Up @@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl)
return;

for (idx = 0; idx < nr_counters_generic; idx++) {
if (test_bit(idx, cpuc->active_mask)) {
u64 val;
u64 val;

rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
continue;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
}

Expand Down

0 comments on commit 4295ee6

Please sign in to comment.