diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3f47778490405..6a18660616f4a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4473,16 +4473,6 @@ static inline u64 reserved_hpa_bits(void) static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - /* - * KVM uses NX when TDP is disabled to handle a variety of scenarios, - * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and - * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. - * The iTLB multi-hit workaround can be toggled at any time, so assume - * NX can be used by any non-nested shadow MMU to avoid having to reset - * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled. - */ - bool uses_nx = is_efer_nx(context) || !tdp_enabled; - /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */ bool is_amd = true; /* KVM doesn't use 2-level page tables for the shadow MMU. */ @@ -4494,7 +4484,8 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, shadow_zero_check = &context->shadow_zero_check; __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(), - context->shadow_root_level, uses_nx, + context->shadow_root_level, + context->mmu_role.base.efer_nx, guest_can_use_gbpages(vcpu), is_pse, is_amd); if (!shadow_me_mask) @@ -4858,6 +4849,16 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, else role.base.level = PT64_ROOT_4LEVEL; + /* + * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role. + * KVM uses NX when TDP is disabled to handle a variety of scenarios, + * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and + * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. + * The iTLB multi-hit workaround can be toggled at any time, so assume + * NX can be used by any non-nested shadow MMU to avoid having to reset + * MMU contexts. + */ + role.base.efer_nx = true; return role; }