Skip to content

Commit

Permalink
KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_…
Browse files Browse the repository at this point in the history
…to_pmc()

Access PMU counters on AMD by directly indexing the array of general
purpose counters instead of translating the PMC index to an MSR index.
AMD only supports gp counters, there's no need to translate a PMC index
to an MSR index and back to a PMC index.

Opportunistically apply array_index_nospec() to reduce the attack
surface for speculative execution and remove the dead code.

Signed-off-by: Like Xu <likexu@tencent.com>
Link: https://lore.kernel.org/r/20220831085328.45489-7-likexu@tencent.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
Like Xu authored and Sean Christopherson committed Sep 28, 2022
1 parent cf52de6 commit 5c6a67f
Showing 1 changed file with 5 additions and 36 deletions.
41 changes: 5 additions & 36 deletions arch/x86/kvm/svm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,23 +33,6 @@ enum index {
INDEX_ERROR,
};

static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);

if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
if (type == PMU_TYPE_COUNTER)
return MSR_F15H_PERF_CTR;
else
return MSR_F15H_PERF_CTL;
} else {
if (type == PMU_TYPE_COUNTER)
return MSR_K7_PERFCTR0;
else
return MSR_K7_EVNTSEL0;
}
}

static enum index msr_to_index(u32 msr)
{
switch (msr) {
Expand Down Expand Up @@ -141,18 +124,12 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)

static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
unsigned int num_counters = pmu->nr_arch_gp_counters;

if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
/*
* The idx is contiguous. The MSRs are not. The counter MSRs
* are interleaved with the event select MSRs.
*/
pmc_idx *= 2;
}
if (pmc_idx >= num_counters)
return NULL;

return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
}

static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Expand All @@ -168,15 +145,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *counters;

idx &= ~(3u << 30);
if (idx >= pmu->nr_arch_gp_counters)
return NULL;
counters = pmu->gp_counters;

return &counters[idx];
return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
}

static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Expand Down

0 comments on commit 5c6a67f

Please sign in to comment.