Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147396
b: refs/heads/master
c: 7c90cc4
h: refs/heads/master
v: v3
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Apr 29, 2009
1 parent c874b35 commit 378a5cc
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6f00cada07bb5da7f751929d3173494dcc5446cc
refs/heads/master: 7c90cc45f89af4dd4617f97d452740ad95b800d5
52 changes: 24 additions & 28 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ struct x86_pmu {
int (*handle_irq)(struct pt_regs *, int);
u64 (*save_disable_all)(void);
void (*restore_all)(u64);
void (*enable)(int, u64);
void (*enable)(struct hw_perf_counter *, int);
void (*disable)(int, u64);
unsigned eventsel;
unsigned perfctr;
Expand Down Expand Up @@ -414,28 +414,15 @@ static inline void intel_pmu_ack_status(u64 ack)
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}

static void intel_pmu_enable_counter(int idx, u64 config)
static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}

static void amd_pmu_enable_counter(int idx, u64 config)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);

if (cpuc->enabled)
config |= ARCH_PERFMON_EVENTSEL0_ENABLE;

wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
}
int err;

static void hw_perf_enable(int idx, u64 config)
{
if (unlikely(!perf_counters_initialized))
return;

x86_pmu.enable(idx, config);
err = checking_wrmsrl(hwc->config_base + idx,
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}

static void intel_pmu_disable_counter(int idx, u64 config)
Expand Down Expand Up @@ -522,8 +509,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
}

static inline void
__pmc_fixed_enable(struct perf_counter *counter,
struct hw_perf_counter *hwc, int __idx)
intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
Expand All @@ -548,14 +534,24 @@ __pmc_fixed_enable(struct perf_counter *counter,
err = checking_wrmsrl(hwc->config_base, ctrl_val);
}

static void
__x86_pmu_enable(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
__pmc_fixed_enable(counter, hwc, idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc, idx);
return;
}

x86_pmu_enable_counter(hwc, idx);
}

static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);

if (cpuc->enabled)
x86_pmu_enable_counter(hwc, idx);
else
hw_perf_enable(idx, hwc->config);
amd_pmu_disable_counter(idx, hwc->config);
}

static int
Expand Down Expand Up @@ -632,7 +628,7 @@ static int x86_pmu_enable(struct perf_counter *counter)
set_bit(idx, cpuc->active);

x86_perf_counter_set_period(counter, hwc, idx);
__x86_pmu_enable(counter, hwc, idx);
x86_pmu.enable(hwc, idx);

return 0;
}
Expand Down Expand Up @@ -728,7 +724,7 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
x86_perf_counter_set_period(counter, hwc, idx);

if (counter->state == PERF_COUNTER_STATE_ACTIVE)
__x86_pmu_enable(counter, hwc, idx);
intel_pmu_enable_counter(hwc, idx);
}

/*
Expand Down

0 comments on commit 378a5cc

Please sign in to comment.