Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull KVM fixes from Paolo Bonzini:
 "ARM:

   - Avoid use of uninitialized memcache pointer in user_mem_abort()

   - Always set HCR_EL2.xMO bits when running in VHE, allowing
     interrupts to be taken while TGE=0 and fixing an ugly bug on
     AmpereOne that occurs when taking an interrupt while clearing the
     xMO bits (AC03_CPU_36)

   - Prevent VMMs from hiding support for AArch64 at any EL virtualized
     by KVM

   - Save/restore the host value for HCRX_EL2 instead of restoring an
     incorrect fixed value

   - Make host_stage2_set_owner_locked() check that the entire requested
     range is memory rather than just the first page

  RISC-V:

   - Add missing reset of smstateen CSRs

  x86:

   - Forcibly leave SMM on SHUTDOWN interception on AMD CPUs to avoid
     causing problems due to KVM stuffing INIT on SHUTDOWN (KVM needs to
     sanitize the VMCB as its state is undefined after SHUTDOWN,
     emulating INIT is the least awful choice).

   - Track the valid sync/dirty fields in kvm_run as a u64 to ensure KVM
     KVM doesn't goof a sanity check in the future.

   - Free obsolete roots when (re)loading the MMU to fix a bug where
     pre-faulting memory can get stuck due to always encountering a
     stale root.

   - When dumping GHCB state, use KVM's snapshot instead of the raw GHCB
     page to print state, so that KVM doesn't print stale/wrong
     information.

   - When changing memory attributes (e.g. shared <=> private), add
     potential hugepage ranges to the mmu_invalidate_range_{start,end}
     set so that KVM doesn't create a shared/private hugepage when the
     the corresponding attributes will become mixed (the attributes are
     commited *after* KVM finishes the invalidation).

   - Rework the SRSO mitigation to enable BP_SPEC_REDUCE only when KVM
     has at least one active VM. Effectively BP_SPEC_REDUCE when KVM is
     loaded led to very measurable performance regressions for non-KVM
     workloads"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: SVM: Set/clear SRSO's BP_SPEC_REDUCE on 0 <=> 1 VM count transitions
  KVM: arm64: Fix memory check in host_stage2_set_owner_locked()
  KVM: arm64: Kill HCRX_HOST_FLAGS
  KVM: arm64: Properly save/restore HCRX_EL2
  KVM: arm64: selftest: Don't try to disable AArch64 support
  KVM: arm64: Prevent userspace from disabling AArch64 support at any virtualisable EL
  KVM: arm64: Force HCR_EL2.xMO to 1 at all times in VHE mode
  KVM: arm64: Fix uninitialized memcache pointer in user_mem_abort()
  KVM: x86/mmu: Prevent installing hugepages when mem attributes are changing
  KVM: SVM: Update dump_ghcb() to use the GHCB snapshot fields
  KVM: RISC-V: reset smstateen CSRs
  KVM: x86/mmu: Check and free obsolete roots in kvm_mmu_reload()
  KVM: x86: Check that the high 32bits are clear in kvm_arch_vcpu_ioctl_run()
  KVM: SVM: Forcibly leave SMM mode on SHUTDOWN interception
  • Loading branch information
Linus Torvalds committed May 11, 2025
2 parents ecb9194 + add2032 commit cd802e7
Show file tree
Hide file tree
Showing 16 changed files with 200 additions and 72 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/el2_setup.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
mrs x0, id_aa64mmfr1_el1
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x0, .Lskip_hcrx_\@
mov_q x0, HCRX_HOST_FLAGS
mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)

/* Enable GCS if supported */
mrs_s x1, SYS_ID_AA64PFR1_EL1
Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,8 @@
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)

#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define MPAMHCR_HOST_FLAGS 0

/* TCR_EL2 Registers bits */
Expand Down
13 changes: 6 additions & 7 deletions arch/arm64/kvm/hyp/include/hyp/switch.h
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)

static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);

/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);

Expand All @@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* EL1 instead of being trapped to EL2.
*/
if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt;

write_sysreg(0, pmselr_el0);

hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
Expand All @@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
hcrx &= ~clr;
}

ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
write_sysreg_s(hcrx, SYS_HCRX_EL2);
}

Expand All @@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)

static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);

write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);

write_sysreg(0, hstr_el2);
if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt;

hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}

if (cpus_have_final_cap(ARM64_HAS_HCX))
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);

__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/hyp/nvhe/mem_protect.c
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
int ret;

if (!addr_is_memory(addr))
if (!range_is_memory(addr, addr + size))
return -EPERM;

ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
Expand Down
36 changes: 21 additions & 15 deletions arch/arm64/kvm/hyp/vgic-v3-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -429,35 +429,41 @@ u64 __vgic_v3_get_gic_config(void)
/*
* To check whether we have a MMIO-based (GICv2 compatible)
* CPU interface, we need to disable the system register
* view. To do that safely, we have to prevent any interrupt
* from firing (which would be deadly).
* view.
*
* Note that this only makes sense on VHE, as interrupts are
* already masked for nVHE as part of the exception entry to
* EL2.
*/
if (has_vhe())
flags = local_daif_save();

/*
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
* interrupt overrides must be set. You've got to love this.
*
* As we always run VHE with HCR_xMO set, no extra xMO
* manipulation is required in that case.
*
* To safely disable SRE, we have to prevent any interrupt
* from firing (which would be deadly). This only makes sense
* on VHE, as interrupts are already masked for nVHE as part
* of the exception entry to EL2.
*/
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
isb();
if (has_vhe()) {
flags = local_daif_save();
} else {
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
isb();
}

write_gicreg(0, ICC_SRE_EL1);
isb();

val = read_gicreg(ICC_SRE_EL1);

write_gicreg(sre, ICC_SRE_EL1);
isb();
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
isb();

if (has_vhe())
if (has_vhe()) {
local_daif_restore(flags);
} else {
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
isb();
}

val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
val |= read_gicreg(ICH_VTR_EL2);
Expand Down
13 changes: 8 additions & 5 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}

if (!is_protected_kvm_enabled())
memcache = &vcpu->arch.mmu_page_cache;
else
memcache = &vcpu->arch.pkvm_memcache;

/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
Expand All @@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!fault_is_perm || (logging_active && write_fault)) {
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);

if (!is_protected_kvm_enabled()) {
memcache = &vcpu->arch.mmu_page_cache;
if (!is_protected_kvm_enabled())
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
} else {
memcache = &vcpu->arch.pkvm_memcache;
else
ret = topup_hyp_memcache(memcache, min_pages);
}

if (ret)
return ret;
}
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;

/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
return -EINVAL;

return set_id_reg(vcpu, rd, user_val);
}

Expand Down
2 changes: 2 additions & 0 deletions arch/riscv/kvm/vcpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
memcpy(cntx, reset_cntx, sizeof(*cntx));
spin_unlock(&vcpu->arch.reset_cntx_lock);

memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));

kvm_riscv_vcpu_fp_reset(vcpu);

kvm_riscv_vcpu_vector_reset(vcpu);
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,

static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
kvm_mmu_free_obsolete_roots(vcpu);

/*
* Checking root.hpa is sufficient even when KVM has mirror root.
* We can have either:
Expand Down
70 changes: 54 additions & 16 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);

static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
Expand Down Expand Up @@ -7669,9 +7670,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
}

#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
}

static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
}

static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
}

bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{
struct kvm_memory_slot *slot = range->slot;
int level;

/*
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
Expand All @@ -7686,6 +7708,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
return false;

if (WARN_ON_ONCE(range->end <= range->start))
return false;

/*
* If the head and tail pages of the range currently allow a hugepage,
* i.e. reside fully in the slot and don't have mixed attributes, then
* add each corresponding hugepage range to the ongoing invalidation,
* e.g. to prevent KVM from creating a hugepage in response to a fault
* for a gfn whose attributes aren't changing. Note, only the range
* of gfns whose attributes are being modified needs to be explicitly
* unmapped, as that will unmap any existing hugepages.
*/
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
gfn_t start = gfn_round_for_level(range->start, level);
gfn_t end = gfn_round_for_level(range->end - 1, level);
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);

if ((start != range->start || start + nr_pages > range->end) &&
start >= slot->base_gfn &&
start + nr_pages <= slot->base_gfn + slot->npages &&
!hugepage_test_mixed(slot, start, level))
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);

if (end == start)
continue;

if ((end + nr_pages) > range->end &&
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
!hugepage_test_mixed(slot, end, level))
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
}

/* Unmap the old attribute page. */
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
range->attr_filter = KVM_FILTER_SHARED;
Expand All @@ -7695,23 +7749,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
return kvm_unmap_gfn_range(kvm, range);
}

static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
}

static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
}

static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
}

static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, int level, unsigned long attrs)
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/smm.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)

kvm_mmu_reset_context(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_smm_changed);

void process_smi(struct kvm_vcpu *vcpu)
{
Expand Down
32 changes: 19 additions & 13 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -3173,9 +3173,14 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
kvfree(svm->sev_es.ghcb_sa);
}

static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
{
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
}

static void dump_ghcb(struct vcpu_svm *svm)
{
struct ghcb *ghcb = svm->sev_es.ghcb;
struct vmcb_control_area *control = &svm->vmcb->control;
unsigned int nbits;

/* Re-use the dump_invalid_vmcb module parameter */
Expand All @@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
return;
}

nbits = sizeof(ghcb->save.valid_bitmap) * 8;
nbits = sizeof(svm->sev_es.valid_bitmap) * 8;

pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
/*
* Print KVM's snapshot of the GHCB values that were (unsuccessfully)
* used to handle the exit. If the guest has since modified the GHCB
* itself, dumping the raw GHCB won't help debug why KVM was unable to
* handle the VMGEXIT that KVM observed.
*/
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
}

static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
Expand Down Expand Up @@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}

static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
{
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
}

static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
Expand Down
Loading

0 comments on commit cd802e7

Please sign in to comment.