Skip to content

Commit

Permalink
perf/x86/intel: Factor out intel_pmu_check_num_counters
Browse files Browse the repository at this point in the history
Each Hybrid PMU has to check its own number of counters and mask fixed
counters before registration.

The intel_pmu_check_num_counters will be reused later to check the
number of the counters for each hybrid PMU.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-12-git-send-email-kan.liang@linux.intel.com
  • Loading branch information
Kan Liang authored and Peter Zijlstra committed Apr 19, 2021
1 parent 183af73 commit b8c4d1a
Showing 1 changed file with 24 additions and 14 deletions.
38 changes: 24 additions & 14 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5064,6 +5064,26 @@ static const struct attribute_group *attr_update[] = {

static struct attribute *empty_attrs;

static void intel_pmu_check_num_counters(int *num_counters,
int *num_counters_fixed,
u64 *intel_ctrl, u64 fixed_mask)
{
if (*num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
*num_counters, INTEL_PMC_MAX_GENERIC);
*num_counters = INTEL_PMC_MAX_GENERIC;
}
*intel_ctrl = (1ULL << *num_counters) - 1;

if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
*num_counters_fixed, INTEL_PMC_MAX_FIXED);
*num_counters_fixed = INTEL_PMC_MAX_FIXED;
}

*intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
}

__init int intel_pmu_init(void)
{
struct attribute **extra_skl_attr = &empty_attrs;
Expand Down Expand Up @@ -5703,20 +5723,10 @@ __init int intel_pmu_init(void)

x86_pmu.attr_update = attr_update;

if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;

if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}

x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
intel_pmu_check_num_counters(&x86_pmu.num_counters,
&x86_pmu.num_counters_fixed,
&x86_pmu.intel_ctrl,
(u64)fixed_mask);

/* AnyThread may be deprecated on arch perfmon v5 or later */
if (x86_pmu.intel_cap.anythread_deprecated)
Expand Down

0 comments on commit b8c4d1a

Please sign in to comment.