Skip to content

Commit

Permalink
kvm: search_memslots: add simple LRU memslot caching
Browse files Browse the repository at this point in the history
In typical guest boot workload only 2-3 memslots are used
extensively, and at that it's mostly the same memslot
lookup operation.

Adding LRU cache improves average lookup time from
46 to 28 cycles (~40%) for this workload.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Igor Mammedov authored and Paolo Bonzini committed Dec 4, 2014
1 parent 7f379cf commit d4ae84a
Showing 1 changed file with 10 additions and 2 deletions.
12 changes: 10 additions & 2 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,7 @@ struct kvm_memslots {
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
/* The mapping table from slot id to the index in memslots[]. */
short id_to_index[KVM_MEM_SLOTS_NUM];
atomic_t lru_slot;
};

struct kvm {
Expand Down Expand Up @@ -790,12 +791,19 @@ static inline void kvm_guest_exit(void)
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
int slot = atomic_read(&slots->lru_slot);
struct kvm_memory_slot *memslot = &slots->memslots[slot];

if (gfn >= memslot->base_gfn &&
gfn < memslot->base_gfn + memslot->npages)
return memslot;

kvm_for_each_memslot(memslot, slots)
if (gfn >= memslot->base_gfn &&
gfn < memslot->base_gfn + memslot->npages)
gfn < memslot->base_gfn + memslot->npages) {
atomic_set(&slots->lru_slot, memslot - slots->memslots);
return memslot;
}

return NULL;
}
Expand Down

0 comments on commit d4ae84a

Please sign in to comment.