Skip to content

Commit

Permalink
perf/x86/intel: Support PEBS on fixed counters
Browse files Browse the repository at this point in the history
The Extended PEBS feature supports PEBS on fixed-function performance
counters as well as all four general purpose counters.

It has to change the order of PEBS and fixed counter enabling to make
sure PEBS is enabled for the fixed counters.

The change of the order doesn't impact the behavior of current code on
other platforms which don't support extended PEBS.
Because there is no dependency among those enable/disable functions.

Don't enable IRQ generation (0x8) for MSR_ARCH_PERFMON_FIXED_CTR_CTRL.
The PEBS ucode will handle the interrupt generation.

Based-on-code-from: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: acme@kernel.org
Link: http://lkml.kernel.org/r/20180309021542.11374-2-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Kan Liang authored and Ingo Molnar committed Jul 25, 2018
1 parent 3196234 commit 4f08b62
Showing 1 changed file with 13 additions and 11 deletions.
24 changes: 13 additions & 11 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);

if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_disable(event);

if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}

x86_pmu_disable_event(event);

if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_disable(event);
}

static void intel_pmu_del_event(struct perf_event *event)
Expand All @@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
x86_perf_event_update(event);
}

static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
static void intel_pmu_enable_fixed(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
u64 ctrl_val, mask, bits = 0;

/*
* Enable IRQ generation (0x8),
* Enable IRQ generation (0x8), if not PEBS,
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
bits = 0x8ULL;
if (!event->attr.precise_ip)
bits |= 0x8;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
Expand Down Expand Up @@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (unlikely(event_is_checkpointed(event)))
cpuc->intel_cp_status |= (1ull << hwc->idx);

if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);

if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc);
intel_pmu_enable_fixed(event);
return;
}

if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);

__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}

Expand Down

0 comments on commit 4f08b62

Please sign in to comment.