Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 202300
b: refs/heads/master
c: 2032a93
h: refs/heads/master
v: v3
  • Loading branch information
Lai Jiangshan authored and Avi Kivity committed Aug 1, 2010
1 parent bb1e399 commit fdd734e
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 11 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c8174f7b35b3018c4c7b3237ed1c792e454fd5c3
refs/heads/master: 2032a93d66fa282ba0f2ea9152eeff9511fa9a96
4 changes: 3 additions & 1 deletion trunk/Documentation/kvm/mmu.txt
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,9 @@ Shadow pages contain the following information:
guest pages as leaves.
gfns:
An array of 512 guest frame numbers, one for each present pte. Used to
perform a reverse map from a pte to a gfn.
perform a reverse map from a pte to a gfn. When role.direct is set, any
element of this array can be calculated from the gfn field when used, in
this case, the array of gfns is not allocated. See role.direct and gfn.
slot_bitmap:
A bitmap containing one bit per memory slot. If the page contains a pte
mapping a page from memory slot n, then bit n of slot_bitmap will be set
Expand Down
38 changes: 29 additions & 9 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,22 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
kmem_cache_free(rmap_desc_cache, rd);
}

static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
if (!sp->role.direct)
return sp->gfns[index];

return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
if (sp->role.direct)
BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
else
sp->gfns[index] = gfn;
}

/*
* Return the pointer to the largepage write count for a given
* gfn, handling slots that are not large page aligned.
Expand Down Expand Up @@ -547,7 +563,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
return count;
gfn = unalias_gfn(vcpu->kvm, gfn);
sp = page_header(__pa(spte));
sp->gfns[spte - sp->spt] = gfn;
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
if (!*rmapp) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
Expand Down Expand Up @@ -605,6 +621,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *sp;
pfn_t pfn;
gfn_t gfn;
unsigned long *rmapp;
int i;

Expand All @@ -616,7 +633,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
kvm_set_pfn_accessed(pfn);
if (is_writable_pte(*spte))
kvm_set_pfn_dirty(pfn);
rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG();
Expand Down Expand Up @@ -900,7 +918,8 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
ASSERT(is_empty_shadow_page(sp->spt));
list_del(&sp->link);
__free_page(virt_to_page(sp->spt));
__free_page(virt_to_page(sp->gfns));
if (!sp->role.direct)
__free_page(virt_to_page(sp->gfns));
kmem_cache_free(mmu_page_header_cache, sp);
++kvm->arch.n_free_mmu_pages;
}
Expand All @@ -911,13 +930,15 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
}

static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
u64 *parent_pte)
u64 *parent_pte, int direct)
{
struct kvm_mmu_page *sp;

sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
Expand Down Expand Up @@ -1386,7 +1407,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
return sp;
}
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
if (!sp)
return sp;
sp->gfn = gfn;
Expand Down Expand Up @@ -3403,7 +3424,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)

if (*sptep & PT_WRITABLE_MASK) {
rev_sp = page_header(__pa(sptep));
gfn = rev_sp->gfns[sptep - rev_sp->spt];
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);

if (!gfn_to_memslot(kvm, gfn)) {
if (!printk_ratelimit())
Expand All @@ -3417,8 +3438,7 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return;
}

rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
rev_sp->role.level);
rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
if (!*rmapp) {
if (!printk_ratelimit())
return;
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)

offset = nr_present = 0;

/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);

if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;

Expand Down

0 comments on commit fdd734e

Please sign in to comment.