Skip to content

Commit

Permalink
KVM: Cache pdptrs
Browse files Browse the repository at this point in the history
Instead of reloading the pdptrs on every entry and exit (vmcs writes on vmx,
guest memory access on svm) extract them on demand.

Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Avi Kivity committed Sep 10, 2009
1 parent 8f5d549 commit 6de4f3a
Show file tree
Hide file tree
Showing 7 changed files with 63 additions and 13 deletions.
4 changes: 4 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,10 @@ enum kvm_reg {
NR_VCPU_REGS
};

enum kvm_reg_ex {
VCPU_EXREG_PDPTR = NR_VCPU_REGS,
};

enum {
VCPU_SREG_ES,
VCPU_SREG_CS,
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kvm/kvm_cache_regs.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,13 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
kvm_register_write(vcpu, VCPU_REGS_RIP, val);
}

static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail))
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);

return vcpu->arch.pdptrs[index];
}

#endif
7 changes: 5 additions & 2 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/

#include "mmu.h"
#include "kvm_cache_regs.h"

#include <linux/kvm_host.h>
#include <linux/types.h>
Expand Down Expand Up @@ -1954,6 +1955,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn;
struct kvm_mmu_page *sp;
int direct = 0;
u64 pdptr;

root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;

Expand Down Expand Up @@ -1981,11 +1983,12 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)

ASSERT(!VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
if (!is_present_pte(vcpu->arch.pdptrs[i])) {
pdptr = kvm_pdptr_read(vcpu, i);
if (!is_present_pte(pdptr)) {
vcpu->arch.mmu.pae_root[i] = 0;
continue;
}
root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
root_gfn = pdptr >> PAGE_SHIFT;
} else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
pte = vcpu->arch.cr3;
#if PTTYPE == 64
if (!is_long_mode(vcpu)) {
pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
if (!is_present_pte(pte))
goto not_present;
--walker->level;
Expand Down
24 changes: 18 additions & 6 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -777,6 +777,18 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}

static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
case VCPU_EXREG_PDPTR:
BUG_ON(!npt_enabled);
load_pdptrs(vcpu, vcpu->arch.cr3);
break;
default:
BUG();
}
}

static void svm_set_vintr(struct vcpu_svm *svm)
{
svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
Expand Down Expand Up @@ -2285,12 +2297,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
vcpu->arch.cr0 = svm->vmcb->save.cr0;
vcpu->arch.cr3 = svm->vmcb->save.cr3;
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
}
if (mmu_reload) {
kvm_mmu_reset_context(vcpu);
kvm_mmu_load(vcpu);
Expand Down Expand Up @@ -2641,6 +2647,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

svm->next_rip = 0;

if (npt_enabled) {
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
}

svm_complete_interrupts(svm);
}

Expand Down Expand Up @@ -2749,6 +2760,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_gdt = svm_set_gdt,
.get_dr = svm_get_dr,
.set_dr = svm_set_dr,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,

Expand Down
22 changes: 18 additions & 4 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,8 @@ static struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR),
};

static void ept_save_pdptrs(struct kvm_vcpu *vcpu);

/*
* Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
* away by decrementing the array size.
Expand Down Expand Up @@ -1047,6 +1049,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
case VCPU_REGS_RIP:
vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
break;
case VCPU_EXREG_PDPTR:
if (enable_ept)
ept_save_pdptrs(vcpu);
break;
default:
break;
}
Expand Down Expand Up @@ -1546,6 +1552,10 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)

static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty))
return;

if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
Expand All @@ -1562,6 +1572,11 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
}

__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty);
}

static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Expand Down Expand Up @@ -3255,10 +3270,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)

/* Access CR3 don't cause VMExit in paging mode, so we need
* to sync with guest real CR3. */
if (enable_ept && is_paging(vcpu)) {
if (enable_ept && is_paging(vcpu))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
ept_save_pdptrs(vcpu);
}

if (unlikely(vmx->fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
Expand Down Expand Up @@ -3567,7 +3580,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
#endif
);

vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_PDPTR));
vcpu->arch.regs_dirty = 0;

get_debugreg(vcpu->arch.dr6, 6);
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
ret = 1;

memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty);
out:

return ret;
Expand All @@ -261,6 +265,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;

if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail))
return true;

r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
Expand Down

0 comments on commit 6de4f3a

Please sign in to comment.