From 87bcc49a8a8b559014ec8cc29ef15aff23154cc1 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 24 Feb 2010 18:59:18 +0100 Subject: [PATCH] --- yaml --- r: 197636 b: refs/heads/master c: 7f5d8b5600b5294137886b46bf00ef811d0fdf32 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/kvm/svm.c | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index e0fd15134478..8874cf0f51df 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b44ea385d8cb187e04ec8d901d4c320c8b07c40b +refs/heads/master: 7f5d8b5600b5294137886b46bf00ef811d0fdf32 diff --git a/trunk/arch/x86/kvm/svm.c b/trunk/arch/x86/kvm/svm.c index b4aac5c7ad87..631d2e544491 100644 --- a/trunk/arch/x86/kvm/svm.c +++ b/trunk/arch/x86/kvm/svm.c @@ -1043,6 +1043,27 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); + if (is_nested(svm)) { + /* + * We are here because we run in nested mode, the host kvm + * intercepts cr0 writes but the l1 hypervisor does not. + * But the L1 hypervisor may intercept selective cr0 writes. + * This needs to be checked here. + */ + unsigned long old, new; + + /* Remove bits that would trigger a real cr0 write intercept */ + old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK; + new = cr0 & SVM_CR0_SELECTIVE_MASK; + + if (old == new) { + /* cr0 write with ts and mp unchanged */ + svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; + if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) + return; + } + } + #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {