diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 848dbe9cbd0eb..8ea1c988e19ba 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -624,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event) event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; if (event->attr.type == event->pmu->type) - event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; + event->hw.config |= x86_pmu_get_event_config(event); if (event->attr.sample_period && x86_pmu.limit_period) { s64 left = event->attr.sample_period; @@ -2098,6 +2098,9 @@ static int __init init_hw_perf_events(void) if (!x86_pmu.intel_ctrl) x86_pmu.intel_ctrl = x86_pmu.cntr_mask64; + if (!x86_pmu.config_mask) + x86_pmu.config_mask = X86_RAW_EVENT_MASK; + perf_events_lapic_init(); register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6a6f1f4b92055..6e42ba0e6b727 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6144,6 +6144,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) pmu->cntr_mask64 = x86_pmu.cntr_mask64; pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64; pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64); + pmu->config_mask = X86_RAW_EVENT_MASK; pmu->unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, pmu->cntr_mask64, 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 493bc9f70ffe1..55468ea89d23d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -695,6 +695,7 @@ struct x86_hybrid_pmu { union perf_capabilities intel_cap; u64 intel_ctrl; u64 pebs_events_mask; + u64 config_mask; union { u64 cntr_mask64; unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; @@ -790,6 +791,7 @@ struct x86_pmu { int (*rdpmc_index)(int index); u64 (*event_map)(int); int max_events; + u64 config_mask; union { u64 cntr_mask64; unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; @@ -1241,6 +1243,11 @@ static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu) return fls64(hybrid(pmu, fixed_cntr_mask64)); } +static inline u64 x86_pmu_get_event_config(struct perf_event *event) +{ + return event->attr.config & hybrid(event->pmu, config_mask); +} + extern struct event_constraint emptyconstraint; extern struct event_constraint unconstrained;