Skip to content

Commit

Permalink
Merge tag 'kvm-x86-fixes-6.9-rcN' of https://github.com/kvm-x86/linux
Browse files Browse the repository at this point in the history
…into HEAD

- Fix a mostly benign bug in the gfn_to_pfn_cache infrastructure where KVM
  would allow userspace to refresh the cache with a bogus GPA.  The bug has
  existed for quite some time, but was exposed by a new sanity check added in
  6.9 (to ensure a cache is either GPA-based or HVA-based).

- Drop an unused param from gfn_to_pfn_cache_invalidate_start() that got left
  behind during a 6.9 cleanup.

- Disable support for virtualizing adaptive PEBS, as KVM's implementation is
  architecturally broken and can leak host LBRs to the guest.

- Fix a bug where KVM neglects to set the enable bits for general purpose
  counters in PERF_GLOBAL_CTRL when initializing the virtual PMU.  Both Intel
  and AMD architectures require the bits to be set at RESET in order for v2
  PMUs to be backwards compatible with software that was written for v1 PMUs,
  i.e. for software that will never manually set the global enables.

- Disable LBR virtualization on CPUs that don't support LBR callstacks, as
  KVM unconditionally uses PERF_SAMPLE_BRANCH_CALL_STACK when creating the
  virtual LBR perf event, i.e. KVM will always fail to create LBR events on
  such CPUs.

- Fix a math goof in x86's hugepage logic for KVM_SET_MEMORY_ATTRIBUTES that
  results in an array overflow (detected by KASAN).

- Fix a flaw in the max_guest_memory selftest that results in it exhausting
  the supply of ucall structures when run with more than 256 vCPUs.

- Mark KVM_MEM_READONLY as supported for RISC-V in set_memory_region_test.

- Fix a bug where KVM incorrectly thinks a TDP MMU root is an indirect shadow
  root due KVM unnecessarily clobbering root_role.direct when userspace sets
  guest CPUID.

- Fix a dirty logging bug in the where KVM fails to write-protect TDP MMU
  SPTEs used for L2 if Page-Modification Logging is enabled for L1 and the L1
  hypervisor is NOT using EPT (if nEPT is enabled, KVM doesn't use the TDP MMU
  to run L2).  For simplicity, KVM always disables PML when running L2, but
  the TDP MMU wasn't accounting for root-specific conditions that force write-
  protect based dirty logging.
  • Loading branch information
Paolo Bonzini committed Apr 16, 2024
2 parents 49ff3b4 + eefb85b commit 1c3bed8
Show file tree
Hide file tree
Showing 15 changed files with 194 additions and 89 deletions.
1 change: 1 addition & 0 deletions arch/x86/events/intel/lbr.c
Original file line number Diff line number Diff line change
Expand Up @@ -1693,6 +1693,7 @@ void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
lbr->from = x86_pmu.lbr_from;
lbr->to = x86_pmu.lbr_to;
lbr->info = x86_pmu.lbr_info;
lbr->has_callstack = x86_pmu_has_lbr_callstack();
}
EXPORT_SYMBOL_GPL(x86_perf_get_lbr);

Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -555,6 +555,7 @@ struct x86_pmu_lbr {
unsigned int from;
unsigned int to;
unsigned int info;
bool has_callstack;
};

extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
Expand Down
9 changes: 5 additions & 4 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5576,9 +5576,9 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
* that problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
vcpu->arch.root_mmu.root_role.word = 0;
vcpu->arch.guest_mmu.root_role.word = 0;
vcpu->arch.nested_mmu.root_role.word = 0;
vcpu->arch.root_mmu.root_role.invalid = 1;
vcpu->arch.guest_mmu.root_role.invalid = 1;
vcpu->arch.nested_mmu.root_role.invalid = 1;
vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
Expand Down Expand Up @@ -7399,7 +7399,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* by the memslot, KVM can't use a hugepage due to the
* misaligned address regardless of memory attributes.
*/
if (gfn >= slot->base_gfn) {
if (gfn >= slot->base_gfn &&
gfn + nr_pages <= slot->base_gfn + slot->npages) {
if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else
Expand Down
51 changes: 22 additions & 29 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1548,17 +1548,21 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}

/*
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
* If AD bits are not enabled, this will require clearing the writable bit on
* each SPTE. Returns true if an SPTE has been changed and the TLBs need to
* be flushed.
*/
static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
{
/*
* All TDP MMU shadow pages share the same role as their root, aside
* from level, so it is valid to key off any shadow page to determine if
* write protection is needed for an entire tree.
*/
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
}

static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;
bool spte_set = false;

Expand All @@ -1573,7 +1577,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;

KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));

if (!(iter.old_spte & dbit))
Expand All @@ -1590,11 +1594,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
}

/*
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
* If AD bits are not enabled, this will require clearing the writable bit on
* each SPTE. Returns true if an SPTE has been changed and the TLBs need to
* be flushed.
* Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
* memslot. Returns true if an SPTE has been changed and the TLBs need to be
* flushed.
*/
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
const struct kvm_memory_slot *slot)
Expand All @@ -1610,18 +1612,11 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
return spte_set;
}

/*
* Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
* set in mask, starting at gfn. The given memslot is expected to contain all
* the GFNs represented by set bits in the mask. If AD bits are enabled,
* clearing the dirty status will involve clearing the dirty bit on each SPTE
* or, if AD bits are not enabled, clearing the writable bit on each SPTE.
*/
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;

lockdep_assert_held_write(&kvm->mmu_lock);
Expand All @@ -1633,7 +1628,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;

KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));

if (iter.level > PG_LEVEL_4K ||
Expand All @@ -1659,11 +1654,9 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
}

/*
* Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
* set in mask, starting at gfn. The given memslot is expected to contain all
* the GFNs represented by set bits in the mask. If AD bits are enabled,
* clearing the dirty status will involve clearing the dirty bit on each SPTE
* or, if AD bits are not enabled, clearing the writable bit on each SPTE.
* Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
* which a bit is set in mask, starting at gfn. The given memslot is expected to
* contain all the GFNs represented by set bits in the mask.
*/
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
Expand Down
16 changes: 14 additions & 2 deletions arch/x86/kvm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -775,8 +775,20 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->pebs_data_cfg_mask = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);

if (vcpu->kvm->arch.enable_pmu)
static_call(kvm_x86_pmu_refresh)(vcpu);
if (!vcpu->kvm->arch.enable_pmu)
return;

static_call(kvm_x86_pmu_refresh)(vcpu);

/*
* At RESET, both Intel and AMD CPUs set all enable bits for general
* purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
* was written for v1 PMUs don't unknowingly leave GP counters disabled
* in the global controls). Emulate that behavior when refreshing the
* PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
*/
if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
}

void kvm_pmu_init(struct kvm_vcpu *vcpu)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/vmx/pmu_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
perf_capabilities = vcpu_get_perf_capabilities(vcpu);
if (cpuid_model_is_consistent(vcpu) &&
(perf_capabilities & PMU_CAP_LBR_FMT))
x86_perf_get_lbr(&lbr_desc->records);
memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
else
lbr_desc->records.nr = 0;

Expand Down
41 changes: 35 additions & 6 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,8 @@ module_param(ple_window_max, uint, 0444);
int __read_mostly pt_mode = PT_MODE_SYSTEM;
module_param(pt_mode, int, S_IRUGO);

struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;

static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
static DEFINE_MUTEX(vmx_l1d_flush_mutex);
Expand Down Expand Up @@ -7862,10 +7864,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx_update_exception_bitmap(vcpu);
}

static u64 vmx_get_perf_capabilities(void)
static __init u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = PMU_CAP_FW_WRITES;
struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0;

if (!enable_pmu)
Expand All @@ -7875,15 +7876,43 @@ static u64 vmx_get_perf_capabilities(void)
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);

if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
x86_perf_get_lbr(&lbr);
if (lbr.nr)
x86_perf_get_lbr(&vmx_lbr_caps);

/*
* KVM requires LBR callstack support, as the overhead due to
* context switching LBRs without said support is too high.
* See intel_pmu_create_guest_lbr_event() for more info.
*/
if (!vmx_lbr_caps.has_callstack)
memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
else if (vmx_lbr_caps.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
}

if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
perf_cap &= ~PERF_CAP_PEBS_BASELINE;

/*
* Disallow adaptive PEBS as it is functionally broken, can be
* used by the guest to read *host* LBRs, and can be used to
* bypass userspace event filters. To correctly and safely
* support adaptive PEBS, KVM needs to:
*
* 1. Account for the ADAPTIVE flag when (re)programming fixed
* counters.
*
* 2. Gain support from perf (or take direct control of counter
* programming) to support events without adaptive PEBS
* enabled for the hardware counter.
*
* 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
* adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
*
* 4. Document which PMU events are effectively exposed to the
* guest via adaptive PEBS, and make adaptive PEBS mutually
* exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
*/
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}

return perf_cap;
Expand Down
6 changes: 5 additions & 1 deletion arch/x86/kvm/vmx/vmx.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "vmx_ops.h"
#include "../cpuid.h"
#include "run_flags.h"
#include "../mmu.h"

#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
Expand Down Expand Up @@ -109,6 +110,8 @@ struct lbr_desc {
bool msr_passthrough;
};

extern struct x86_pmu_lbr vmx_lbr_caps;

/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
Expand Down Expand Up @@ -719,7 +722,8 @@ static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
if (!enable_ept)
return true;

return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
return allow_smaller_maxphyaddr &&
cpuid_maxphyaddr(vcpu) < kvm_get_shadow_phys_bits();
}

static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
Expand Down
15 changes: 6 additions & 9 deletions tools/testing/selftests/kvm/max_guest_memory_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
uint64_t gpa;

for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
*((volatile uint64_t *)gpa) = gpa;

GUEST_DONE();
for (;;) {
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
*((volatile uint64_t *)gpa) = gpa;
GUEST_SYNC(0);
}
}

struct vcpu_info {
Expand Down Expand Up @@ -55,7 +56,7 @@ static void rendezvous_with_boss(void)
static void run_vcpu(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
}

static void *vcpu_worker(void *data)
Expand All @@ -64,17 +65,13 @@ static void *vcpu_worker(void *data)
struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
struct kvm_regs regs;

vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);

/* Snapshot regs before the first run. */
vcpu_regs_get(vcpu, &regs);
rendezvous_with_boss();

run_vcpu(vcpu);
rendezvous_with_boss();
vcpu_regs_set(vcpu, &regs);
vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */
Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/kvm/set_memory_region_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ static void test_invalid_memory_region_flags(void)
struct kvm_vm *vm;
int r, i;

#if defined __aarch64__ || defined __x86_64__
#if defined __aarch64__ || defined __riscv || defined __x86_64__
supported_flags |= KVM_MEM_READONLY;
#endif

Expand Down
20 changes: 19 additions & 1 deletion tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -416,12 +416,30 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters

static void guest_test_gp_counters(void)
{
uint8_t pmu_version = guest_get_pmu_version();
uint8_t nr_gp_counters = 0;
uint32_t base_msr;

if (guest_get_pmu_version())
if (pmu_version)
nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);

/*
* For v2+ PMUs, PERF_GLOBAL_CTRL's architectural post-RESET value is
* "Sets bits n-1:0 and clears the upper bits", where 'n' is the number
* of GP counters. If there are no GP counters, require KVM to leave
* PERF_GLOBAL_CTRL '0'. This edge case isn't covered by the SDM, but
* follow the spirit of the architecture and only globally enable GP
* counters, of which there are none.
*/
if (pmu_version > 1) {
uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL);

if (nr_gp_counters)
GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0));
else
GUEST_ASSERT_EQ(global_ctrl, 0);
}

if (this_cpu_has(X86_FEATURE_PDCM) &&
rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
base_msr = MSR_IA32_PMC0;
Expand Down
Loading

0 comments on commit 1c3bed8

Please sign in to comment.