Skip to content

Commit

Permalink
KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry
Browse files Browse the repository at this point in the history
Loading CR3 as part of emulating vmentry is different from regular CR3 loads,
as implemented in kvm_set_cr3, in several ways.

* different rules are followed to check CR3 and it is desirable for the caller
to distinguish between the possible failures
* PDPTRs are not loaded if PAE paging and nested EPT are both enabled
* many MMU operations are not necessary

This patch introduces nested_vmx_load_cr3 suitable for CR3 loads as part of
nested vmentry and vmexit, and makes use of it on the nested vmentry path.

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
  • Loading branch information
Ladi Prosek authored and Paolo Bonzini committed Dec 8, 2016
1 parent ee146c1 commit 9ed38ff
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 16 deletions.
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1071,6 +1071,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);

int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
bool pdptrs_changed(struct kvm_vcpu *vcpu);

int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
Expand Down
57 changes: 42 additions & 15 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -9968,6 +9968,44 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
return 0;
}

/*
* Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
* emulating VM entry into a guest with EPT enabled.
* Returns 0 on success, 1 on failure. Invalid state exit qualification code
* is assigned to entry_failure_code on failure.
*/
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
unsigned long *entry_failure_code)
{
unsigned long invalid_mask;

if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
if (cr3 & invalid_mask) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return 1;
}

/*
* If PAE paging and EPT are both on, CR3 is not used by the CPU and
* must not be dereferenced.
*/
if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
!nested_ept) {
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
*entry_failure_code = ENTRY_FAIL_PDPTE;
return 1;
}
}

vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
}

kvm_mmu_reset_context(vcpu);
return 0;
}

/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
Expand Down Expand Up @@ -10300,21 +10338,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);

/*
* Shadow page tables on either EPT or shadow page tables.
* If PAE and EPT are both on, CR3 is not used by the CPU and must not
* be dereferenced.
*/
if (is_pae(vcpu) && is_paging(vcpu) && !is_long_mode(vcpu) &&
nested_ept_enabled) {
vcpu->arch.cr3 = vmcs12->guest_cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
} else {
if (kvm_set_cr3(vcpu, vmcs12->guest_cr3)) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return 1;
}
}
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
entry_failure_code))
return 1;

kvm_mmu_reset_context(vcpu);

Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
}
EXPORT_SYMBOL_GPL(load_pdptrs);

static bool pdptrs_changed(struct kvm_vcpu *vcpu)
bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
bool changed = true;
Expand All @@ -592,6 +592,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)

return changed;
}
EXPORT_SYMBOL_GPL(pdptrs_changed);

int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
Expand Down

0 comments on commit 9ed38ff

Please sign in to comment.