Skip to content

Commit

Permalink
perf/x86: Only show format attributes when supported
Browse files Browse the repository at this point in the history
Only show the Intel format attributes in sysfs when the feature is actually
supported with the current model numbers. This allows programs to probe
what format attributes are available, and give a sensible error message
to users if they are not.

This handles near all cases for intel attributes since Nehalem,
except the (obscure) case when the model number if known, but PEBS
is disabled in PERF_CAPABILITIES.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170822185201.9261-2-andi@firstfloor.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Andi Kleen authored and Ingo Molnar committed Aug 25, 2017
1 parent d061841 commit a5df70c
Showing 1 changed file with 41 additions and 7 deletions.
48 changes: 41 additions & 7 deletions arch/x86/events/intel/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -3415,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_any.attr,
&format_attr_inv.attr,
&format_attr_cmask.attr,
NULL,
};

static struct attribute *hsw_format_attr[] = {
&format_attr_in_tx.attr,
&format_attr_in_tx_cp.attr,
&format_attr_offcore_rsp.attr,
&format_attr_ldlat.attr,
NULL
};

&format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
&format_attr_ldlat.attr, /* PEBS load latency */
NULL,
static struct attribute *nhm_format_attr[] = {
&format_attr_offcore_rsp.attr,
&format_attr_ldlat.attr,
NULL
};

static struct attribute *slm_format_attr[] = {
&format_attr_offcore_rsp.attr,
NULL
};

static struct attribute *skl_format_attr[] = {
Expand Down Expand Up @@ -3795,6 +3809,7 @@ __init int intel_pmu_init(void)
unsigned int unused;
struct extra_reg *er;
int version, i;
struct attribute **extra_attr = NULL;

if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
Expand Down Expand Up @@ -3906,6 +3921,7 @@ __init int intel_pmu_init(void)
intel_pmu_pebs_data_source_nhm();
x86_add_quirk(intel_nehalem_quirk);
x86_pmu.pebs_no_tlb = 1;
extra_attr = nhm_format_attr;

pr_cont("Nehalem events, ");
break;
Expand Down Expand Up @@ -3941,6 +3957,7 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_slm_extra_regs;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = slm_events_attrs;
extra_attr = slm_format_attr;
pr_cont("Silvermont events, ");
break;

Expand All @@ -3966,6 +3983,7 @@ __init int intel_pmu_init(void)
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = glm_events_attrs;
extra_attr = slm_format_attr;
pr_cont("Goldmont events, ");
break;

Expand All @@ -3992,6 +4010,7 @@ __init int intel_pmu_init(void)
x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */
event_attr_td_total_slots_scale_glm.event_str = "4";
extra_attr = slm_format_attr;
pr_cont("Goldmont plus events, ");
break;

Expand Down Expand Up @@ -4021,6 +4040,7 @@ __init int intel_pmu_init(void)
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);

intel_pmu_pebs_data_source_nhm();
extra_attr = nhm_format_attr;
pr_cont("Westmere events, ");
break;

Expand Down Expand Up @@ -4057,6 +4077,8 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);

extra_attr = nhm_format_attr;

pr_cont("SandyBridge events, ");
break;

Expand Down Expand Up @@ -4091,6 +4113,8 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);

extra_attr = nhm_format_attr;

pr_cont("IvyBridge events, ");
break;

Expand Down Expand Up @@ -4119,6 +4143,8 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
pr_cont("Haswell events, ");
break;

Expand Down Expand Up @@ -4155,6 +4181,8 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.limit_period = bdw_limit_period;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
pr_cont("Broadwell events, ");
break;

Expand All @@ -4173,7 +4201,7 @@ __init int intel_pmu_init(void)
/* all extra regs are per-cpu when HT is on */
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;

extra_attr = slm_format_attr;
pr_cont("Knights Landing/Mill events, ");
break;

Expand Down Expand Up @@ -4204,9 +4232,9 @@ __init int intel_pmu_init(void)

x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
skl_format_attr);
WARN_ON(!x86_pmu.format_attrs);
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
extra_attr = merge_attr(extra_attr, skl_format_attr);
x86_pmu.cpu_events = hsw_events_attrs;
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
Expand All @@ -4229,6 +4257,12 @@ __init int intel_pmu_init(void)
}
}

if (version >= 2 && extra_attr) {
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
extra_attr);
WARN_ON(!x86_pmu.format_attrs);
}

if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
Expand Down

0 comments on commit a5df70c

Please sign in to comment.