Skip to content

Commit

Permalink
KVM: x86: pass struct kvm_mmu_page to gfn_to_rmap
Browse files Browse the repository at this point in the history
This is always available (with one exception in the auditing code),
and with the same auditing exception the level was coming from
sp->role.level.

Later, the spte's role will also be used to look up the right memslots
array.

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Paolo Bonzini committed Jun 5, 2015
1 parent f481b06 commit e4cd1da
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 7 deletions.
10 changes: 5 additions & 5 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1043,12 +1043,12 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
/*
* Take gfn and return the reverse mapping to it.
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;

slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_rmap(gfn, level, slot);
return __gfn_to_rmap(gfn, sp->role.level, slot);
}

static bool rmap_can_add(struct kvm_vcpu *vcpu)
Expand All @@ -1066,7 +1066,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)

sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
return pte_list_add(vcpu, spte, rmapp);
}

Expand All @@ -1078,7 +1078,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)

sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
rmapp = gfn_to_rmap(kvm, gfn, sp);
pte_list_remove(spte, rmapp);
}

Expand Down Expand Up @@ -1612,7 +1612,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)

sp = page_header(__pa(spte));

rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);

kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
kvm_flush_remote_tlbs(vcpu->kvm);
Expand Down
8 changes: 6 additions & 2 deletions arch/x86/kvm/mmu_audit.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return;
}

rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
rmapp = gfn_to_rmap(kvm, gfn, rev_sp);
if (!*rmapp) {
if (!__ratelimit(&ratelimit_state))
return;
Expand Down Expand Up @@ -191,11 +191,15 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
unsigned long *rmapp;
u64 *sptep;
struct rmap_iterator iter;
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;

if (sp->role.direct || sp->unsync || sp->role.invalid)
return;

rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL);
slots = kvm_memslots(kvm);
slot = __gfn_to_memslot(slots, sp->gfn);
rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);

for_each_rmap_spte(rmapp, &iter, sptep)
if (is_writable_pte(*sptep))
Expand Down

0 comments on commit e4cd1da

Please sign in to comment.