Skip to content

Commit

Permalink
KVM: x86/pmu: Drop amd_event_mapping[] in the KVM context
Browse files Browse the repository at this point in the history
All gp or fixed counters have been reprogrammed using PERF_TYPE_RAW,
which means that the table that maps perf_hw_id to event select values is
no longer useful, at least for AMD.

For Intel, the logic to check if the pmu event reported by Intel cpuid is
not available is still required, in which case pmc_perf_hw_id() could be
renamed to hw_event_is_unavail() and a bool value is returned to replace
the semantics of "PERF_COUNT_HW_MAX+1".

Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20220518132512.37864-12-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Like Xu authored and Paolo Bonzini committed Jun 8, 2022
1 parent 08dca7a commit 7aadaa9
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 65 deletions.
2 changes: 1 addition & 1 deletion arch/x86/include/asm/kvm-x86-pmu-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ BUILD_BUG_ON(1)
* a NULL definition, for example if "static_call_cond()" will be used
* at the call sites.
*/
KVM_X86_PMU_OP(pmc_perf_hw_id)
KVM_X86_PMU_OP(hw_event_available)
KVM_X86_PMU_OP(pmc_is_enabled)
KVM_X86_PMU_OP(pmc_idx_to_pmc)
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kvm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,6 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
};
bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);

if (type == PERF_TYPE_HARDWARE && config >= PERF_COUNT_HW_MAX)
return;

attr.sample_period = get_sample_period(pmc, pmc->counter);

if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
Expand Down Expand Up @@ -258,6 +255,9 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
__u64 key;
int idx;

if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
return false;

filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
if (!filter)
goto out;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct kvm_event_hw_type_mapping {
};

struct kvm_pmu_ops {
unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
bool (*hw_event_available)(struct kvm_pmc *pmc);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
Expand Down
56 changes: 3 additions & 53 deletions arch/x86/kvm/svm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,34 +33,6 @@ enum index {
INDEX_ERROR,
};

/* duplicated from amd_perfmon_event_map, K7 and above should work. */
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};

/* duplicated from amd_f17h_perfmon_event_map. */
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};

/* amd_pmc_perf_hw_id depends on these being the same size */
static_assert(ARRAY_SIZE(amd_event_mapping) ==
ARRAY_SIZE(amd_f17h_event_mapping));

static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
Expand Down Expand Up @@ -154,31 +126,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
return &pmu->gp_counters[msr_to_index(msr)];
}

static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
static bool amd_hw_event_available(struct kvm_pmc *pmc)
{
struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;

/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
if (WARN_ON(pmc_is_fixed(pmc)))
return PERF_COUNT_HW_MAX;

if (guest_cpuid_family(pmc->vcpu) >= 0x17)
event_mapping = amd_f17h_event_mapping;
else
event_mapping = amd_event_mapping;

for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (event_mapping[i].eventsel == event_select
&& event_mapping[i].unit_mask == unit_mask)
break;

if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX;

return event_mapping[i].event_type;
return true;
}

/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
Expand Down Expand Up @@ -345,7 +295,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
}

struct kvm_pmu_ops amd_pmu_ops __initdata = {
.pmc_perf_hw_id = amd_pmc_perf_hw_id,
.hw_event_available = amd_hw_event_available,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
Expand Down
11 changes: 4 additions & 7 deletions arch/x86/kvm/vmx/pmu_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
}
}

static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
static bool intel_hw_event_available(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
Expand All @@ -98,15 +98,12 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)

/* disable event that reported as not present by cpuid */
if ((i < 7) && !(pmu->available_event_types & (1 << i)))
return PERF_COUNT_HW_MAX + 1;
return false;

break;
}

if (i == ARRAY_SIZE(intel_arch_events))
return PERF_COUNT_HW_MAX;

return intel_arch_events[i].event_type;
return true;
}

/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
Expand Down Expand Up @@ -814,7 +811,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}

struct kvm_pmu_ops intel_pmu_ops __initdata = {
.pmc_perf_hw_id = intel_pmc_perf_hw_id,
.hw_event_available = intel_hw_event_available,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
Expand Down

0 comments on commit 7aadaa9

Please sign in to comment.