Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 288721
b: refs/heads/master
c: 60ce0fb
h: refs/heads/master
i:
  288719: c15306a
v: v3
  • Loading branch information
Stephane Eranian authored and Ingo Molnar committed Mar 5, 2012
1 parent 3d93e1b commit c2a1684
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 88c9a65e13f393fd60d8b9e9c659a34f9e39967d
refs/heads/master: 60ce0fbd072695866cb27b729690ab59dce705a5
2 changes: 2 additions & 0 deletions trunk/arch/x86/kernel/cpu/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,8 @@ void intel_pmu_lbr_init_atom(void);

void intel_pmu_lbr_init_snb(void);

int intel_pmu_setup_lbr_filter(struct perf_event *event);

int p4_pmu_init(void);

int p6_pmu_init(void);
Expand Down
35 changes: 35 additions & 0 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -727,6 +727,19 @@ static __initconst const u64 atom_hw_cache_event_ids
},
};

static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
{
/* user explicitly requested branch sampling */
if (has_branch_stack(event))
return true;

/* implicit branch sampling to correct PEBS skid */
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
return true;

return false;
}

static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Expand Down Expand Up @@ -881,6 +894,13 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);

/*
* must disable before any actual event
* because any event may be combined with LBR
*/
if (intel_pmu_needs_lbr_smpl(event))
intel_pmu_lbr_disable(event);

if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
Expand Down Expand Up @@ -935,6 +955,12 @@ static void intel_pmu_enable_event(struct perf_event *event)
intel_pmu_enable_bts(hwc->config);
return;
}
/*
* must enabled before any actual event
* because any event may be combined with LBR
*/
if (intel_pmu_needs_lbr_smpl(event))
intel_pmu_lbr_enable(event);

if (event->attr.exclude_host)
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
Expand Down Expand Up @@ -1057,6 +1083,9 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)

data.period = event->hw.last_period;

if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack;

if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
Expand Down Expand Up @@ -1305,6 +1334,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
event->hw.config = alt_config;
}

if (intel_pmu_needs_lbr_smpl(event)) {
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
}

if (event->attr.type != PERF_TYPE_RAW)
return 0;

Expand Down
10 changes: 4 additions & 6 deletions trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -439,9 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event)
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;

cpuc->pebs_enabled |= 1ULL << hwc->idx;

if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
}

void intel_pmu_pebs_disable(struct perf_event *event)
Expand All @@ -454,9 +451,6 @@ void intel_pmu_pebs_disable(struct perf_event *event)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);

hwc->config |= ARCH_PERFMON_EVENTSEL_INT;

if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_disable(event);
}

void intel_pmu_pebs_enable_all(void)
Expand Down Expand Up @@ -572,6 +566,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* both formats and we don't use the other fields in this
* routine.
*/
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct pebs_record_core *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
Expand Down Expand Up @@ -602,6 +597,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
else
regs.flags &= ~PERF_EFLAGS_EXACT;

if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack;

if (perf_event_overflow(event, &data, &regs))
x86_pmu_stop(event, 0);
}
Expand Down
86 changes: 84 additions & 2 deletions trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ enum {

#define LBR_FROM_FLAG_MISPRED (1ULL << 63)

#define for_each_branch_sample_type(x) \
for ((x) = PERF_SAMPLE_BRANCH_USER; \
(x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)

/*
* We only support LBR implementations that have FREEZE_LBRS_ON_PMI
* otherwise it becomes near impossible to get a reliable stack.
Expand All @@ -64,6 +68,10 @@ enum {
static void __intel_pmu_lbr_enable(void)
{
u64 debugctl;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

if (cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);

rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
Expand Down Expand Up @@ -119,7 +127,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
* Reset the LBR stack if we changed task context to
* avoid data leaks.
*/

if (event->ctx->task && cpuc->lbr_context != event->ctx) {
intel_pmu_lbr_reset();
cpuc->lbr_context = event->ctx;
Expand All @@ -138,8 +145,11 @@ void intel_pmu_lbr_disable(struct perf_event *event)
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);

if (cpuc->enabled && !cpuc->lbr_users)
if (cpuc->enabled && !cpuc->lbr_users) {
__intel_pmu_lbr_disable();
/* avoid stale pointer */
cpuc->lbr_context = NULL;
}
}

void intel_pmu_lbr_enable_all(void)
Expand All @@ -158,6 +168,9 @@ void intel_pmu_lbr_disable_all(void)
__intel_pmu_lbr_disable();
}

/*
* TOS = most recently recorded branch
*/
static inline u64 intel_pmu_lbr_tos(void)
{
u64 tos;
Expand Down Expand Up @@ -241,6 +254,75 @@ void intel_pmu_lbr_read(void)
intel_pmu_lbr_read_64(cpuc);
}

/*
* setup the HW LBR filter
* Used only when available, may not be enough to disambiguate
* all branches, may need the help of the SW filter
*/
static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
{
struct hw_perf_event_extra *reg;
u64 br_type = event->attr.branch_sample_type;
u64 mask = 0, m;
u64 v;

for_each_branch_sample_type(m) {
if (!(br_type & m))
continue;

v = x86_pmu.lbr_sel_map[m];
if (v == LBR_NOT_SUPP)
return -EOPNOTSUPP;
mask |= v;

if (m == PERF_SAMPLE_BRANCH_ANY)
break;
}
reg = &event->hw.branch_reg;
reg->idx = EXTRA_REG_LBR;

/* LBR_SELECT operates in suppress mode so invert mask */
reg->config = ~mask & x86_pmu.lbr_sel_mask;

return 0;
}

/*
* all the bits supported on some flavor of x86LBR
* we ignore BRANCH_HV because it is not supported
*/
#define PERF_SAMPLE_BRANCH_X86_ALL \
(PERF_SAMPLE_BRANCH_ANY |\
PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL)

int intel_pmu_setup_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;

/*
* no LBR on this PMU
*/
if (!x86_pmu.lbr_nr)
return -EOPNOTSUPP;

/*
* if no LBR HW filter, users can only
* capture all branches
*/
if (!x86_pmu.lbr_sel_map) {
if (br_type != PERF_SAMPLE_BRANCH_X86_ALL)
return -EOPNOTSUPP;
return 0;
}
/*
* we ignore branch priv levels we do not
* know about: BRANCH_HV
*/

return intel_pmu_setup_hw_lbr_filter(event);
}

/*
* Map interface branch filters onto LBR filters
*/
Expand Down

0 comments on commit c2a1684

Please sign in to comment.