Skip to content

Commit

Permalink
KVM: x86: clear SMM flags before loading state while leaving SMM
Browse files Browse the repository at this point in the history
RSM emulation is currently broken on VMX when the interrupted guest has
CR4.VMXE=1.  Stop dancing around the issue of HF_SMM_MASK being set when
loading SMSTATE into architectural state, e.g. by toggling it for
problematic flows, and simply clear HF_SMM_MASK prior to loading
architectural state (from SMRAM save state area).

Reported-by: Jon Doron <arilou@gmail.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Fixes: 5bea512 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Tested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Sean Christopherson authored and Paolo Bonzini committed Apr 16, 2019
1 parent c5833c7 commit 9ec1949
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 16 deletions.
12 changes: 6 additions & 6 deletions arch/x86/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if (ret != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;

if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);

ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));

/*
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
Expand Down Expand Up @@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE;
}

if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);

ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));

ctxt->ops->post_leave_smm(ctxt);

return X86EMUL_CONTINUE;
Expand Down
12 changes: 4 additions & 8 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -6239,21 +6239,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
struct page *page;
u64 guest;
u64 vmcb;
int ret;

guest = GET_SMSTATE(u64, smstate, 0x7ed8);
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);

if (guest) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
nested_vmcb = nested_svm_map(svm, vmcb, &page);
if (nested_vmcb)
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
else
ret = 1;
vcpu->arch.hflags |= HF_SMM_MASK;
if (!nested_vmcb)
return 1;
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
}
return ret;
return 0;
}

static int enable_smi_window(struct kvm_vcpu *vcpu)
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -7409,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
}

if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu->arch.hflags |= HF_SMM_MASK;
if (ret)
return ret;

Expand Down

0 comments on commit 9ec1949

Please sign in to comment.