Skip to content

Commit

Permalink
KVM: selftests: Add PMU feature framework, use in PMU event filter test
Browse files Browse the repository at this point in the history
Add an X86_PMU_FEATURE_* framework to simplify probing architectural
events on Intel PMUs, which require checking the length of a bit vector
and the _absence_ of a "feature" bit.  Add helpers for both KVM and
"this CPU", and use the newfangled magic (along with X86_PROPERTY_*)
to  clean up pmu_event_filter_test.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006005125.680782-10-seanjc@google.com
  • Loading branch information
Sean Christopherson committed Nov 17, 2022
1 parent 4feb9d2 commit 5228c02
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 44 deletions.
41 changes: 41 additions & 0 deletions tools/testing/selftests/kvm/include/x86_64/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,8 @@ struct kvm_x86_cpu_property {

#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)

#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
#define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
Expand All @@ -221,6 +223,29 @@ struct kvm_x86_cpu_property {

#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)

/*
* Intel's architectural PMU events are bizarre. They have a "feature" bit
* that indicates the feature is _not_ supported, and a property that states
* the length of the bit mask of unsupported features. A feature is supported
* if the size of the bit mask is larger than the "unavailable" bit, and said
* bit is not set.
*
* Wrap the "unavailable" feature to simplify checking whether or not a given
* architectural event is supported.
*/
struct kvm_x86_pmu_feature {
struct kvm_x86_cpu_feature anti_feature;
};
#define KVM_X86_PMU_FEATURE(name, __bit) \
({ \
struct kvm_x86_pmu_feature feature = { \
.anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \
}; \
\
feature; \
})

#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5)

/* Page table bitfield declarations */
#define PTE_PRESENT_MASK BIT_ULL(0)
Expand Down Expand Up @@ -535,6 +560,14 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
return max_leaf >= property.function;
}

static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
{
uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);

return nr_bits > feature.anti_feature.bit &&
!this_cpu_has(feature.anti_feature);
}

#define SET_XMM(__var, __xmm) \
asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)

Expand Down Expand Up @@ -743,6 +776,14 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
return max_leaf >= property.function;
}

static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
{
uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);

return nr_bits > feature.anti_feature.bit &&
!kvm_cpu_has(feature.anti_feature);
}

static inline size_t kvm_cpuid2_size(int nr_entries)
{
return sizeof(struct kvm_cpuid2) +
Expand Down
51 changes: 7 additions & 44 deletions tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,6 @@
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)

union cpuid10_eax {
struct {
unsigned int version_id:8;
unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
unsigned int full;
};

union cpuid10_ebx {
struct {
unsigned int no_unhalted_core_cycles:1;
unsigned int no_instructions_retired:1;
unsigned int no_unhalted_reference_cycles:1;
unsigned int no_llc_reference:1;
unsigned int no_llc_misses:1;
unsigned int no_branch_instruction_retired:1;
unsigned int no_branch_misses_retired:1;
} split;
unsigned int full;
};

/* End of stuff taken from perf_event.h. */

/* Oddly, this isn't in perf_event.h. */
Expand Down Expand Up @@ -380,30 +357,16 @@ static void test_pmu_config_disable(void (*guest_code)(void))
}

/*
* Check for a non-zero PMU version, at least one general-purpose
* counter per logical processor, an EBX bit vector of length greater
* than 5, and EBX[5] clear.
*/
static bool check_intel_pmu_leaf(const struct kvm_cpuid_entry2 *entry)
{
union cpuid10_eax eax = { .full = entry->eax };
union cpuid10_ebx ebx = { .full = entry->ebx };

return eax.split.version_id && eax.split.num_counters > 0 &&
eax.split.mask_length > ARCH_PERFMON_BRANCHES_RETIRED &&
!ebx.split.no_branch_instruction_retired;
}

/*
* Note that CPUID leaf 0xa is Intel-specific. This leaf should be
* clear on AMD hardware.
* On Intel, check for a non-zero PMU version, at least one general-purpose
* counter per logical processor, and support for counting the number of branch
* instructions retired.
*/
static bool use_intel_pmu(void)
{
const struct kvm_cpuid_entry2 *entry;

entry = kvm_get_supported_cpuid_entry(0xa);
return is_intel_cpu() && check_intel_pmu_leaf(entry);
return is_intel_cpu() &&
kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
}

static bool is_zen1(uint32_t eax)
Expand Down

0 comments on commit 5228c02

Please sign in to comment.