Skip to content

Commit

Permalink
KVM: x86/mmu: Leverage vcpu->last_used_slot for rmap_add and rmap_rec…
Browse files Browse the repository at this point in the history
…ycle

rmap_add() and rmap_recycle() both run in the context of the vCPU and
thus we can use kvm_vcpu_gfn_to_memslot() to look up the memslot. This
enables rmap_add() and rmap_recycle() to take advantage of
vcpu->last_used_slot and avoid expensive memslot searching.

This change improves the performance of "Populate memory time" in
dirty_log_perf_test with tdp_mmu=N. In addition to improving the
performance, "Populate memory time" no longer scales with the number
of memslots in the VM.

Command                         | Before           | After
------------------------------- | ---------------- | -------------
./dirty_log_perf_test -v64 -x1  | 15.18001570s     | 14.99469366s
./dirty_log_perf_test -v64 -x64 | 18.71336392s     | 14.98675076s

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20210804222844.1419481-6-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
David Matlack authored and Paolo Bonzini committed Aug 6, 2021
1 parent 081de47 commit 601f8af
Showing 1 changed file with 20 additions and 15 deletions.
35 changes: 20 additions & 15 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1044,17 +1044,6 @@ static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
}

static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;

slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
return __gfn_to_rmap(gfn, sp->role.level, slot);
}

static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_memory_cache *mc;
Expand All @@ -1065,24 +1054,39 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)

static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head;

sp = sptep_to_sp(spte);
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
return pte_list_add(vcpu, spte, rmap_head);
}


static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
struct kvm_mmu_page *sp;
gfn_t gfn;
struct kvm_rmap_head *rmap_head;

sp = sptep_to_sp(spte);
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp);

/*
* Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
* context of a vCPU so have to determine which memslots to use based
* on context information in sp->role.
*/
slots = kvm_memslots_for_spte_role(kvm, sp->role);

slot = __gfn_to_memslot(slots, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);

__pte_list_remove(spte, rmap_head);
}

Expand Down Expand Up @@ -1620,12 +1624,13 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,

static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_memory_slot *slot;
struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp;

sp = sptep_to_sp(spte);

rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);

kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
Expand Down

0 comments on commit 601f8af

Please sign in to comment.