Skip to content

Commit

Permalink
KVM: MMU: Split gfn_to_rmap() into two functions
Browse files Browse the repository at this point in the history
rmap_write_protect() calls gfn_to_rmap() for each level with gfn fixed.
This results in calling gfn_to_memslot() repeatedly with that gfn.

This patch introduces __gfn_to_rmap() which takes the slot as an
argument to avoid this.

This is also needed for the following dirty logging optimization.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Takuya Yoshikawa authored and Avi Kivity committed Dec 27, 2011
1 parent d6eebf8 commit 9b9b149
Showing 1 changed file with 17 additions and 9 deletions.
26 changes: 17 additions & 9 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -958,23 +958,29 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
}
}

/*
* Take gfn and return the reverse mapping to it.
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;

slot = gfn_to_memslot(kvm, gfn);
if (likely(level == PT_PAGE_TABLE_LEVEL))
return &slot->rmap[gfn - slot->base_gfn];

linfo = lpage_info_slot(gfn, slot, level);

return &linfo->rmap_pde;
}

/*
* Take gfn and return the reverse mapping to it.
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{
struct kvm_memory_slot *slot;

slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_rmap(kvm, gfn, level, slot);
}

static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_memory_cache *cache;
Expand Down Expand Up @@ -1019,12 +1025,14 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)

static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
u64 *spte;
int i, write_protected = 0;

rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
slot = gfn_to_memslot(kvm, gfn);

rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
Expand All @@ -1039,7 +1047,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
rmapp = gfn_to_rmap(kvm, gfn, i);
rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
Expand Down

0 comments on commit 9b9b149

Please sign in to comment.