Skip to content

Commit

Permalink
perf/x86: Add config_mask to represent EVENTSEL bitmask
Browse files Browse the repository at this point in the history
Different vendors may support different fields in EVENTSEL MSR, such as
Intel would introduce new fields umask2 and eq bits in EVENTSEL MSR
since Perfmon version 6. However, a fixed mask X86_RAW_EVENT_MASK is
used to filter the attr.config.

Introduce a new config_mask to record the real supported EVENTSEL
bitmask.
Only apply it to the existing code now. No functional change.

Co-developed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Link: https://lkml.kernel.org/r/20240626143545.480761-7-kan.liang@linux.intel.com
  • Loading branch information
Kan Liang authored and Peter Zijlstra committed Jul 4, 2024
1 parent 608f697 commit e8fb5d6
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 1 deletion.
5 changes: 4 additions & 1 deletion arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;

if (event->attr.type == event->pmu->type)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
event->hw.config |= x86_pmu_get_event_config(event);

if (event->attr.sample_period && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
Expand Down Expand Up @@ -2098,6 +2098,9 @@ static int __init init_hw_perf_events(void)
if (!x86_pmu.intel_ctrl)
x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;

if (!x86_pmu.config_mask)
x86_pmu.config_mask = X86_RAW_EVENT_MASK;

perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");

Expand Down
1 change: 1 addition & 0 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -6144,6 +6144,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
pmu->cntr_mask64 = x86_pmu.cntr_mask64;
pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
pmu->config_mask = X86_RAW_EVENT_MASK;
pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, pmu->cntr_mask64,
0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/events/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,7 @@ struct x86_hybrid_pmu {
union perf_capabilities intel_cap;
u64 intel_ctrl;
u64 pebs_events_mask;
u64 config_mask;
union {
u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Expand Down Expand Up @@ -790,6 +791,7 @@ struct x86_pmu {
int (*rdpmc_index)(int index);
u64 (*event_map)(int);
int max_events;
u64 config_mask;
union {
u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Expand Down Expand Up @@ -1241,6 +1243,11 @@ static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
return fls64(hybrid(pmu, fixed_cntr_mask64));
}

static inline u64 x86_pmu_get_event_config(struct perf_event *event)
{
return event->attr.config & hybrid(event->pmu, config_mask);
}

extern struct event_constraint emptyconstraint;

extern struct event_constraint unconstrained;
Expand Down

0 comments on commit e8fb5d6

Please sign in to comment.