Skip to content

Commit

Permalink
KVM: x86: remove shadow_memtype_mask
Browse files Browse the repository at this point in the history
The IGNORE_GUEST_PAT quirk is inapplicable, and thus always-disabled,
if shadow_memtype_mask is zero.  As long as vmx_get_mt_mask is not
called for the shadow paging case, there is no need to consult
shadow_memtype_mask and it can be removed altogether.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Paolo Bonzini committed Mar 14, 2025
1 parent c9c1e20 commit 3fee483
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 34 deletions.
15 changes: 0 additions & 15 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4663,21 +4663,6 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
}
#endif

bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm)
{
/*
* When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
* has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
* honor the memtype from the guest's PAT so that guest accesses to
* memory that is DMA'd aren't cached against the guest's wishes. As a
* result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA.
* KVM _always_ ignores guest PAT, when EPT is enabled and when quirk
* KVM_X86_QUIRK_IGNORE_GUEST_PAT is enabled or the CPU lacks the
* ability to safely honor guest PAT.
*/
return kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT);
}

int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
#ifdef CONFIG_X86_64
Expand Down
19 changes: 2 additions & 17 deletions arch/x86/kvm/mmu/spte.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ u64 __read_mostly shadow_mmio_value;
u64 __read_mostly shadow_mmio_mask;
u64 __read_mostly shadow_mmio_access_mask;
u64 __read_mostly shadow_present_mask;
u64 __read_mostly shadow_memtype_mask;
u64 __read_mostly shadow_me_value;
u64 __read_mostly shadow_me_mask;
u64 __read_mostly shadow_acc_track_mask;
Expand Down Expand Up @@ -203,9 +202,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;

if (shadow_memtype_mask)
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
if (host_writable)
spte |= shadow_host_writable_mask;
else
Expand Down Expand Up @@ -460,13 +457,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
/* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */
shadow_present_mask =
(has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT;
/*
* EPT overrides the host MTRRs, and so KVM must program the desired
* memtype directly into the SPTEs. Note, this mask is just the mask
* of all bits that factor into the memtype, the actual memtype must be
* dynamically calculated, e.g. to ensure host MMIO is mapped UC.
*/
shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;

shadow_acc_track_mask = VMX_EPT_RWX_MASK;
shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
Expand Down Expand Up @@ -518,12 +509,6 @@ void kvm_mmu_reset_all_pte_masks(void)
shadow_x_mask = 0;
shadow_present_mask = PT_PRESENT_MASK;

/*
* For shadow paging and NPT, KVM uses PAT entry '0' to encode WB
* memtype in the SPTEs, i.e. relies on host MTRRs to provide the
* correct memtype (WB is the "weakest" memtype).
*/
shadow_memtype_mask = 0;
shadow_acc_track_mask = 0;
shadow_me_mask = 0;
shadow_me_value = 0;
Expand Down
1 change: 0 additions & 1 deletion arch/x86/kvm/mmu/spte.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ extern u64 __read_mostly shadow_mmio_value;
extern u64 __read_mostly shadow_mmio_mask;
extern u64 __read_mostly shadow_mmio_access_mask;
extern u64 __read_mostly shadow_present_mask;
extern u64 __read_mostly shadow_memtype_mask;
extern u64 __read_mostly shadow_me_value;
extern u64 __read_mostly shadow_me_mask;

Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -8434,6 +8434,8 @@ __init int vmx_hardware_setup(void)
if (enable_ept)
kvm_mmu_set_ept_masks(enable_ept_ad_bits,
cpu_has_vmx_ept_execute_only());
else
vt_x86_ops.get_mt_mask = NULL;

/*
* Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
Expand Down
4 changes: 3 additions & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -13567,8 +13567,10 @@ static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
* due to toggling the "ignore PAT" bit. Zap all SPTEs when the first
* (or last) non-coherent device is (un)registered to so that new SPTEs
* with the correct "ignore guest PAT" setting are created.
*
* If KVM always honors guest PAT, however, there is nothing to do.
*/
if (kvm_mmu_may_ignore_guest_pat(kvm))
if (kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT))
kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
}

Expand Down

0 comments on commit 3fee483

Please sign in to comment.