Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 188251
b: refs/heads/master
c: aff3d91
h: refs/heads/master
i:
  188249: c9ab2f3
  188247: 3e8b24e
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Mar 10, 2010
1 parent 8781026 commit ae4908c
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 34 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cc2ad4ba8792b9d4ff893ae3b845d2c5a6206fc9
refs/heads/master: aff3d91a913c9ae0c2f56b65b27cbd00c7d27ee3
31 changes: 15 additions & 16 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,8 @@ struct x86_pmu {
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(void);
void (*enable)(struct hw_perf_event *, int);
void (*disable)(struct hw_perf_event *, int);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
Expand Down Expand Up @@ -845,7 +845,7 @@ void hw_perf_enable(void)
set_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = event;

x86_pmu.enable(hwc, hwc->idx);
x86_pmu.enable(event);
perf_event_update_userpage(event);
}
cpuc->n_added = 0;
Expand All @@ -858,15 +858,16 @@ void hw_perf_enable(void)
x86_pmu.enable_all();
}

static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
{
(void)checking_wrmsrl(hwc->config_base + idx,
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
}

static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
static inline void x86_pmu_disable_event(struct perf_event *event)
{
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
struct hw_perf_event *hwc = &event->hw;
(void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
}

static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Expand Down Expand Up @@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event)
return ret;
}

static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void x86_pmu_enable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
__x86_pmu_enable_event(hwc, idx);
__x86_pmu_enable_event(&event->hw);
}

/*
Expand Down Expand Up @@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event)

static int x86_pmu_start(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;

if (hwc->idx == -1)
if (event->hw.idx == -1)
return -EAGAIN;

x86_perf_event_set_period(event);
x86_pmu.enable(hwc, hwc->idx);
x86_pmu.enable(event);

return 0;
}
Expand All @@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event)
cpuc->events[hwc->idx] != event))
return;

x86_pmu.enable(hwc, hwc->idx);
x86_pmu.enable(event);
}

void perf_event_print_debug(void)
Expand Down Expand Up @@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event)
* could reenable again:
*/
clear_bit(idx, cpuc->active_mask);
x86_pmu.disable(hwc, idx);
x86_pmu.disable(event);

/*
* Drain the remaining delta count out of a event
Expand Down Expand Up @@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
continue;

if (perf_event_overflow(event, 1, &data, regs))
x86_pmu.disable(hwc, idx);
x86_pmu.disable(event);
}

if (handled)
Expand Down
30 changes: 17 additions & 13 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack)
}

static inline void
intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
int idx = __idx - X86_PMC_IDX_FIXED;
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;

mask = 0xfULL << (idx * 4);
Expand Down Expand Up @@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void)
}

static inline void
intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
intel_pmu_disable_event(struct perf_event *event)
{
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
struct hw_perf_event *hwc = &event->hw;

if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
return;
}

if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc, idx);
intel_pmu_disable_fixed(hwc);
return;
}

x86_pmu_disable_event(hwc, idx);
x86_pmu_disable_event(event);
}

static inline void
intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
int idx = __idx - X86_PMC_IDX_FIXED;
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
int err;

Expand Down Expand Up @@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
err = checking_wrmsrl(hwc->config_base, ctrl_val);
}

static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void intel_pmu_enable_event(struct perf_event *event)
{
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
struct hw_perf_event *hwc = &event->hw;

if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
if (!__get_cpu_var(cpu_hw_events).enabled)
return;

Expand All @@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
}

if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc, idx);
intel_pmu_enable_fixed(hwc);
return;
}

__x86_pmu_enable_event(hwc, idx);
__x86_pmu_enable_event(hwc);
}

/*
Expand Down Expand Up @@ -771,7 +775,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
data.period = event->hw.last_period;

if (perf_event_overflow(event, 1, &data, regs))
intel_pmu_disable_event(&event->hw, bit);
intel_pmu_disable_event(event);
}

intel_pmu_ack_status(ack);
Expand Down
10 changes: 6 additions & 4 deletions trunk/arch/x86/kernel/cpu/perf_event_p6.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void)
}

static inline void
p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
p6_pmu_disable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val = P6_NOP_EVENT;

if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;

(void)checking_wrmsrl(hwc->config_base + idx, val);
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
}

static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void p6_pmu_enable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val;

val = hwc->config;
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;

(void)checking_wrmsrl(hwc->config_base + idx, val);
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
}

static __initconst struct x86_pmu p6_pmu = {
Expand Down

0 comments on commit ae4908c

Please sign in to comment.