Skip to content

Commit

Permalink
perf/x86/intel: Factor out intel_pmu_check_extra_regs
Browse files Browse the repository at this point in the history
Each Hybrid PMU has to check and update its own extra registers before
registration.

The intel_pmu_check_extra_regs will be reused later to check the extra
registers of each hybrid PMU.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-14-git-send-email-kan.liang@linux.intel.com
  • Loading branch information
Kan Liang authored and Peter Zijlstra committed Apr 19, 2021
1 parent bc14fe1 commit 34d5b61
Showing 1 changed file with 21 additions and 14 deletions.
35 changes: 21 additions & 14 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5127,6 +5127,26 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con
}
}

static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
{
struct extra_reg *er;

/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
* Check all extra_regs here.
*/
if (!extra_regs)
return;

for (er = extra_regs; er->msr; er++) {
er->extra_msr_access = check_msr(er->msr, 0x11UL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
x86_pmu.lbr_sel_map = NULL;
}
}

__init int intel_pmu_init(void)
{
struct attribute **extra_skl_attr = &empty_attrs;
Expand All @@ -5138,7 +5158,6 @@ __init int intel_pmu_init(void)
union cpuid10_eax eax;
union cpuid10_ebx ebx;
unsigned int fixed_mask;
struct extra_reg *er;
bool pmem = false;
int version, i;
char *name;
Expand Down Expand Up @@ -5795,19 +5814,7 @@ __init int intel_pmu_init(void)
if (x86_pmu.lbr_nr)
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);

/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
* Check all extra_regs here.
*/
if (x86_pmu.extra_regs) {
for (er = x86_pmu.extra_regs; er->msr; er++) {
er->extra_msr_access = check_msr(er->msr, 0x11UL);
/* Disable LBR select mapping */
if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
x86_pmu.lbr_sel_map = NULL;
}
}
intel_pmu_check_extra_regs(x86_pmu.extra_regs);

/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
Expand Down

0 comments on commit 34d5b61

Please sign in to comment.