Skip to content

Commit

Permalink
perf/x86: Hybrid PMU support for unconstrained
Browse files Browse the repository at this point in the history
The unconstrained value depends on the number of GP and fixed counters.
Each hybrid PMU should use its own unconstrained.

Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1618237865-33448-8-git-send-email-kan.liang@linux.intel.com
  • Loading branch information
Kan Liang authored and Peter Zijlstra committed Apr 19, 2021
1 parent d4b294b commit eaacf07
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 1 deletion.
2 changes: 1 addition & 1 deletion arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -3147,7 +3147,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
}

return &unconstrained;
return &hybrid_var(cpuc->pmu, unconstrained);
}

static struct event_constraint *
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/events/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -638,6 +638,7 @@ struct x86_hybrid_pmu {
int max_pebs_events;
int num_counters;
int num_counters_fixed;
struct event_constraint unconstrained;
};

static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
Expand All @@ -658,6 +659,16 @@ extern struct static_key_false perf_is_hybrid;
__Fp; \
}))

#define hybrid_var(_pmu, _var) \
(*({ \
typeof(&_var) __Fp = &_var; \
\
if (is_hybrid() && (_pmu)) \
__Fp = &hybrid_pmu(_pmu)->_var; \
\
__Fp; \
}))

/*
* struct x86_pmu - generic x86 pmu
*/
Expand Down

0 comments on commit eaacf07

Please sign in to comment.