Skip to content

Commit

Permalink
perf/x86/intel: Factor out intel_update_topdown_event()
Browse files Browse the repository at this point in the history
Similar to Ice Lake, Intel Sapphire Rapids server also supports the
topdown performance metrics feature. The difference is that Intel
Sapphire Rapids server extends the PERF_METRICS MSR to feature TMA
method level two metrics, which will introduce 8 metrics events. Current
icl_update_topdown_event() only check 4 level one metrics events.

Factor out intel_update_topdown_event() to facilitate the code sharing
between Ice Lake and Sapphire Rapids.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1611873611-156687-3-git-send-email-kan.liang@linux.intel.com
  • Loading branch information
Kan Liang authored and Peter Zijlstra committed Feb 1, 2021
1 parent 2a6c6b7 commit 628d923
Showing 1 changed file with 13 additions and 7 deletions.
20 changes: 13 additions & 7 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2325,8 +2325,8 @@ static void __icl_update_topdown_event(struct perf_event *event,
}
}

static void update_saved_topdown_regs(struct perf_event *event,
u64 slots, u64 metrics)
static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
u64 metrics, int metric_end)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *other;
Expand All @@ -2335,7 +2335,7 @@ static void update_saved_topdown_regs(struct perf_event *event,
event->hw.saved_slots = slots;
event->hw.saved_metric = metrics;

for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
if (!is_topdown_idx(idx))
continue;
other = cpuc->events[idx];
Expand All @@ -2350,7 +2350,8 @@ static void update_saved_topdown_regs(struct perf_event *event,
* The PERF_METRICS and Fixed counter 3 are read separately. The values may be
* modify by a NMI. PMU has to be disabled before calling this function.
*/
static u64 icl_update_topdown_event(struct perf_event *event)

static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *other;
Expand All @@ -2366,7 +2367,7 @@ static u64 icl_update_topdown_event(struct perf_event *event)
/* read PERF_METRICS */
rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);

for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
if (!is_topdown_idx(idx))
continue;
other = cpuc->events[idx];
Expand All @@ -2392,7 +2393,7 @@ static u64 icl_update_topdown_event(struct perf_event *event)
* Don't need to reset the PERF_METRICS and Fixed counter 3.
* Because the values will be restored in next schedule in.
*/
update_saved_topdown_regs(event, slots, metrics);
update_saved_topdown_regs(event, slots, metrics, metric_end);
reset = false;
}

Expand All @@ -2401,12 +2402,17 @@ static u64 icl_update_topdown_event(struct perf_event *event)
wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
wrmsrl(MSR_PERF_METRICS, 0);
if (event)
update_saved_topdown_regs(event, 0, 0);
update_saved_topdown_regs(event, 0, 0, metric_end);
}

return slots;
}

static u64 icl_update_topdown_event(struct perf_event *event)
{
return intel_update_topdown_event(event, INTEL_PMC_IDX_TD_BE_BOUND);
}

static void intel_pmu_read_topdown_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Expand Down

0 comments on commit 628d923

Please sign in to comment.