diff --git a/[refs] b/[refs] index 460a24b41a2f..7030b4d6f61e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 92fe13be74303a7b80dc3c99e22e12a87d41bd5f +refs/heads/master: b923e62e4d48bc5242b32a6ef5ba0f886137668a diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index 0d281dbc008f..9529bff04262 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -63,6 +63,9 @@ module_param_named(unrestricted_guest, static int __read_mostly emulate_invalid_guest_state = 0; module_param(emulate_invalid_guest_state, bool, S_IRUGO); +static int __read_mostly vmm_exclusive = 1; +module_param(vmm_exclusive, bool, S_IRUGO); + #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) #define KVM_GUEST_CR0_MASK \ @@ -845,7 +848,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tsc_this, delta, new_offset; - if (vcpu->cpu != cpu) + if (vmm_exclusive && vcpu->cpu != cpu) vcpu_clear(vmx); if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { @@ -891,6 +894,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { __vmx_load_host_state(to_vmx(vcpu)); + if (!vmm_exclusive) + __vcpu_clear(to_vmx(vcpu)); } static void vmx_fpu_activate(struct kvm_vcpu *vcpu)