Skip to content

Commit

Permalink
perf/x86: Add check_period PMU callback
Browse files Browse the repository at this point in the history
Vince (and later on Ravi) reported crashes in the BTS code during
fuzzing with the following backtrace:

  general protection fault: 0000 [#1] SMP PTI
  ...
  RIP: 0010:perf_prepare_sample+0x8f/0x510
  ...
  Call Trace:
   <IRQ>
   ? intel_pmu_drain_bts_buffer+0x194/0x230
   intel_pmu_drain_bts_buffer+0x160/0x230
   ? tick_nohz_irq_exit+0x31/0x40
   ? smp_call_function_single_interrupt+0x48/0xe0
   ? call_function_single_interrupt+0xf/0x20
   ? call_function_single_interrupt+0xa/0x20
   ? x86_schedule_events+0x1a0/0x2f0
   ? x86_pmu_commit_txn+0xb4/0x100
   ? find_busiest_group+0x47/0x5d0
   ? perf_event_set_state.part.42+0x12/0x50
   ? perf_mux_hrtimer_restart+0x40/0xb0
   intel_pmu_disable_event+0xae/0x100
   ? intel_pmu_disable_event+0xae/0x100
   x86_pmu_stop+0x7a/0xb0
   x86_pmu_del+0x57/0x120
   event_sched_out.isra.101+0x83/0x180
   group_sched_out.part.103+0x57/0xe0
   ctx_sched_out+0x188/0x240
   ctx_resched+0xa8/0xd0
   __perf_event_enable+0x193/0x1e0
   event_function+0x8e/0xc0
   remote_function+0x41/0x50
   flush_smp_call_function_queue+0x68/0x100
   generic_smp_call_function_single_interrupt+0x13/0x30
   smp_call_function_single_interrupt+0x3e/0xe0
   call_function_single_interrupt+0xf/0x20
   </IRQ>

The reason is that while event init code does several checks
for BTS events and prevents several unwanted config bits for
BTS event (like precise_ip), the PERF_EVENT_IOC_PERIOD allows
to create BTS event without those checks being done.

Following sequence will cause the crash:

If we create an 'almost' BTS event with precise_ip and callchains,
and it into a BTS event it will crash the perf_prepare_sample()
function because precise_ip events are expected to come
in with callchain data initialized, but that's not the
case for intel_pmu_drain_bts_buffer() caller.

Adding a check_period callback to be called before the period
is changed via PERF_EVENT_IOC_PERIOD. It will deny the change
if the event would become BTS. Plus adding also the limit_period
check as well.

Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: <stable@vger.kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20190204123532.GA4794@krava
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Jiri Olsa authored and Ingo Molnar committed Feb 11, 2019
1 parent d139371 commit 81ec3f3
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 2 deletions.
14 changes: 14 additions & 0 deletions arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
x86_pmu.check_microcode();
}

static int x86_pmu_check_period(struct perf_event *event, u64 value)
{
if (x86_pmu.check_period && x86_pmu.check_period(event, value))
return -EINVAL;

if (value && x86_pmu.limit_period) {
if (x86_pmu.limit_period(event, value) > value)
return -EINVAL;
}

return 0;
}

static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
Expand All @@ -2302,6 +2315,7 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
.check_period = x86_pmu_check_period,
};

void arch_perf_update_userpage(struct perf_event *event,
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
intel_pmu_lbr_sched_task(ctx, sched_in);
}

static int intel_pmu_check_period(struct perf_event *event, u64 value)
{
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
}

PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");

PMU_FORMAT_ATTR(ldlat, "config1:0-15");
Expand Down Expand Up @@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.cpu_dead = intel_pmu_cpu_dead,

.check_period = intel_pmu_check_period,
};

static struct attribute *intel_pmu_attrs[];
Expand Down Expand Up @@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {

.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,

.check_period = intel_pmu_check_period,
};

static __init void intel_clovertown_quirk(void)
Expand Down
16 changes: 14 additions & 2 deletions arch/x86/events/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,11 @@ struct x86_pmu {
* Intel host/guest support (KVM)
*/
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);

/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/
int (*check_period) (struct perf_event *event, u64 period);
};

struct x86_perf_task_context {
Expand Down Expand Up @@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)

#ifdef CONFIG_CPU_SUP_INTEL

static inline bool intel_pmu_has_bts(struct perf_event *event)
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
Expand All @@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);

return hw_event == bts_event && hwc->sample_period == 1;
return hw_event == bts_event && period == 1;
}

static inline bool intel_pmu_has_bts(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;

return intel_pmu_has_bts_period(event, hwc->sample_period);
}

int intel_pmu_save_and_restart(struct perf_event *event);
Expand Down
5 changes: 5 additions & 0 deletions include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,11 @@ struct pmu {
* Filter events for PMU-specific reasons.
*/
int (*filter_match) (struct perf_event *event); /* optional */

/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/
int (*check_period) (struct perf_event *event, u64 value); /* optional */
};

enum perf_addr_filter_action_t {
Expand Down
16 changes: 16 additions & 0 deletions kernel/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
}
}

static int perf_event_check_period(struct perf_event *event, u64 value)
{
return event->pmu->check_period(event, value);
}

static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
u64 value;
Expand All @@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;

if (perf_event_check_period(event, value))
return -EINVAL;

event_function_call(event, __perf_event_period, &value);

return 0;
Expand Down Expand Up @@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
return 0;
}

static int perf_event_nop_int(struct perf_event *event, u64 value)
{
return 0;
}

static DEFINE_PER_CPU(unsigned int, nop_txn_flags);

static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
Expand Down Expand Up @@ -9691,6 +9704,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
pmu->pmu_disable = perf_pmu_nop_void;
}

if (!pmu->check_period)
pmu->check_period = perf_event_nop_int;

if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;

Expand Down

0 comments on commit 81ec3f3

Please sign in to comment.