Skip to content

Commit

Permalink
perf/x86: Mark Intel PT and LBR/BTS as mutually exclusive
Browse files Browse the repository at this point in the history
Intel PT cannot be used at the same time as LBR or BTS and will cause a
general protection fault if they are used together. In order to avoid
fixing up GPs in the fast path, instead we disallow creating LBR/BTS
events when PT events are present and vice versa.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kaixu Xia <kaixu.xia@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Robert Richter <rric@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@infradead.org
Cc: adrian.hunter@intel.com
Cc: kan.liang@intel.com
Cc: markus.t.metzger@intel.com
Cc: mathieu.poirier@linaro.org
Link: http://lkml.kernel.org/r/1421237903-181015-12-git-send-email-alexander.shishkin@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Alexander Shishkin authored and Ingo Molnar committed Apr 2, 2015
1 parent ed69628 commit 4807034
Show file tree
Hide file tree
Showing 3 changed files with 94 additions and 0 deletions.
43 changes: 43 additions & 0 deletions arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,14 @@ static void hw_perf_event_destroy(struct perf_event *event)
}
}

void hw_perf_lbr_event_destroy(struct perf_event *event)
{
hw_perf_event_destroy(event);

/* undo the lbr/bts event accounting */
x86_del_exclusive(x86_lbr_exclusive_lbr);
}

static inline int x86_pmu_initialized(void)
{
return x86_pmu.handle_irq != NULL;
Expand Down Expand Up @@ -302,6 +310,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return x86_pmu_extra_regs(val, event);
}

/*
* Check if we can create event of a certain type (that no conflicting events
* are present).
*/
int x86_add_exclusive(unsigned int what)
{
int ret = -EBUSY, i;

if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what]))
return 0;

mutex_lock(&pmc_reserve_mutex);
for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
goto out;

atomic_inc(&x86_pmu.lbr_exclusive[what]);
ret = 0;

out:
mutex_unlock(&pmc_reserve_mutex);
return ret;
}

void x86_del_exclusive(unsigned int what)
{
atomic_dec(&x86_pmu.lbr_exclusive[what]);
}

int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
Expand Down Expand Up @@ -346,6 +383,12 @@ int x86_setup_perfctr(struct perf_event *event)
/* BTS is currently only allowed for user-mode. */
if (!attr->exclude_kernel)
return -EOPNOTSUPP;

/* disallow bts if conflicting events are present */
if (x86_add_exclusive(x86_lbr_exclusive_lbr))
return -EBUSY;

event->destroy = hw_perf_lbr_event_destroy;
}

hwc->config |= config;
Expand Down
40 changes: 40 additions & 0 deletions arch/x86/kernel/cpu/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,12 @@ union x86_pmu_config {

#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value

enum {
x86_lbr_exclusive_lbr,
x86_lbr_exclusive_pt,
x86_lbr_exclusive_max,
};

/*
* struct x86_pmu - generic x86 pmu
*/
Expand Down Expand Up @@ -505,6 +511,11 @@ struct x86_pmu {
const int *lbr_sel_map; /* lbr_select mappings */
bool lbr_double_abort; /* duplicated lbr aborts */

/*
* Intel PT/LBR/BTS are exclusive
*/
atomic_t lbr_exclusive[x86_lbr_exclusive_max];

/*
* Extra registers for events
*/
Expand Down Expand Up @@ -603,6 +614,12 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}

int x86_add_exclusive(unsigned int what);

void x86_del_exclusive(unsigned int what);

void hw_perf_lbr_event_destroy(struct perf_event *event);

int x86_setup_perfctr(struct perf_event *event);

int x86_pmu_hw_config(struct perf_event *event);
Expand Down Expand Up @@ -689,6 +706,29 @@ static inline int amd_pmu_init(void)

#ifdef CONFIG_CPU_SUP_INTEL

static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
{
/* user explicitly requested branch sampling */
if (has_branch_stack(event))
return true;

/* implicit branch sampling to correct PEBS skid */
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
x86_pmu.intel_cap.pebs_format < 2)
return true;

return false;
}

static inline bool intel_pmu_has_bts(struct perf_event *event)
{
if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
!event->attr.freq && event->hw.sample_period == 1)
return true;

return false;
}

int intel_pmu_save_and_restart(struct perf_event *event);

struct event_constraint *
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1942,6 +1942,17 @@ static int intel_pmu_hw_config(struct perf_event *event)
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;

/*
* BTS is set up earlier in this path, so don't account twice
*/
if (!intel_pmu_has_bts(event)) {
/* disallow lbr if conflicting events are present */
if (x86_add_exclusive(x86_lbr_exclusive_lbr))
return -EBUSY;

event->destroy = hw_perf_lbr_event_destroy;
}
}

if (event->attr.type != PERF_TYPE_RAW)
Expand Down

0 comments on commit 4807034

Please sign in to comment.