From 7292a6896f9417658bb4821d2a8dc8835917b084 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 5 Jun 2012 17:56:48 -0700 Subject: [PATCH] --- yaml --- r: 312223 b: refs/heads/master c: 70ab7003dec58afeae7f5d681dfa309b3a259f03 h: refs/heads/master i: 312221: 6a4337c7c7fef8b95d1f79b59dc0e97af26628d1 312219: c2958dffcbb459ce6e2b004907dbebf3e2090951 312215: ad099436e23880901993e2f6ddd36347de06a90c 312207: f33f94239e237939fa02fa17db7b1a39073f4e45 312191: 8a5691055baf932140694f2aa91a3de17d908ddc v: v3 --- [refs] | 2 +- trunk/arch/x86/kernel/cpu/perf_event.h | 3 ++- trunk/arch/x86/kernel/cpu/perf_event_intel.c | 2 ++ trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c | 8 ++++---- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/[refs] b/[refs] index 5ae9aa842323..27386113de55 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c48b60538c3ba05a7a2713c4791b25405525431b +refs/heads/master: 70ab7003dec58afeae7f5d681dfa309b3a259f03 diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index 23b5710b1747..3df3de9452a9 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -69,7 +69,7 @@ struct amd_nb { }; /* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 4 +#define MAX_PEBS_EVENTS 8 /* * A debug store configuration. @@ -378,6 +378,7 @@ struct x86_pmu { void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); + int max_pebs_events; /* * Intel LBR diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 187c294bc658..e23e71f25264 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -1800,6 +1800,8 @@ __init int intel_pmu_init(void) x86_pmu.events_maskl = ebx.full; x86_pmu.events_mask_len = eax.split.mask_length; + x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); + /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events: diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c index 35e2192df9f4..026373edef7f 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) * Should not happen, we program the threshold at 1 and do not * set a reset value. */ - WARN_ON_ONCE(n > 1); + WARN_ONCE(n > 1, "bad leftover pebs %d\n", n); at += n - 1; __intel_pmu_pebs_event(event, iregs, at); @@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * Should not happen, we program the threshold at 1 and do not * set a reset value. */ - WARN_ON_ONCE(n > MAX_PEBS_EVENTS); + WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n); for ( ; at < top; at++) { - for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { + for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) { event = cpuc->events[bit]; if (!test_bit(bit, cpuc->active_mask)) continue; @@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) break; } - if (!event || bit >= MAX_PEBS_EVENTS) + if (!event || bit >= x86_pmu.max_pebs_events) continue; __intel_pmu_pebs_event(event, iregs, at);