Skip to content

Commit

Permalink
KVM: Allow dynamic allocation of the mmu shadow cache size
Browse files Browse the repository at this point in the history
The user is now able to set how many mmu pages will be allocated to the guest.

Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
  • Loading branch information
Izik Eidus authored and Avi Kivity committed Jan 30, 2008
1 parent 195aefd commit 82ce2c9
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 3 deletions.
7 changes: 6 additions & 1 deletion drivers/kvm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
#define KVM_MAX_VCPUS 4
#define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 8
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_NUM_MMU_PAGES 1024
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
Expand Down Expand Up @@ -418,7 +420,9 @@ struct kvm {
* Hash table of struct kvm_mmu_page.
*/
struct list_head active_mmu_pages;
int n_free_mmu_pages;
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_alloc_mmu_pages;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
unsigned long rmap_overflow;
Expand Down Expand Up @@ -547,6 +551,7 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);

hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
Expand Down
47 changes: 47 additions & 0 deletions drivers/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -743,6 +743,24 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;

if (!kvm->n_requested_mmu_pages) {
unsigned int n_pages;

if (npages) {
n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
n_pages);
} else {
unsigned int nr_mmu_pages;

n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
nr_mmu_pages = max(nr_mmu_pages,
(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
}
}

*memslot = new;

kvm_mmu_slot_remove_write_access(kvm, mem->slot);
Expand All @@ -760,6 +778,26 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
return r;
}

static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
u32 kvm_nr_mmu_pages)
{
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;

mutex_lock(&kvm->lock);

kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;

mutex_unlock(&kvm->lock);
return 0;
}

static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
return kvm->n_alloc_mmu_pages;
}

/*
* Get (and clear) the dirty memory log for a memory slot.
*/
Expand Down Expand Up @@ -3071,6 +3109,14 @@ static long kvm_vm_ioctl(struct file *filp,
goto out;
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
if (r)
goto out;
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
break;
case KVM_GET_DIRTY_LOG: {
struct kvm_dirty_log log;

Expand Down Expand Up @@ -3278,6 +3324,7 @@ static long kvm_dev_ioctl(struct file *filp,
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
r = 1;
break;
default:
Expand Down
40 changes: 38 additions & 2 deletions drivers/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -747,6 +747,40 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
kvm_mmu_reset_last_pte_updated(kvm);
}

/*
* Changing the number of mmu pages allocated to the vm
* Note: if kvm_nr_mmu_pages is too small, you will get dead lock
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/

if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
kvm_nr_mmu_pages) {
int n_used_mmu_pages = kvm->n_alloc_mmu_pages
- kvm->n_free_mmu_pages;

while (n_used_mmu_pages > kvm_nr_mmu_pages) {
struct kvm_mmu_page *page;

page = container_of(kvm->active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(kvm, page);
n_used_mmu_pages--;
}
kvm->n_free_mmu_pages = 0;
}
else
kvm->n_free_mmu_pages += kvm_nr_mmu_pages
- kvm->n_alloc_mmu_pages;

kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
}

static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
unsigned index;
Expand Down Expand Up @@ -1297,8 +1331,10 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)

ASSERT(vcpu);

vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;

if (vcpu->kvm->n_requested_mmu_pages)
vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
else
vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
Expand Down
3 changes: 3 additions & 0 deletions include/linux/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -347,11 +347,14 @@ struct kvm_signal_mask {
*/
#define KVM_CAP_IRQCHIP 0
#define KVM_CAP_HLT 1
#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2

/*
* ioctls for VM fds
*/
#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
Expand Down

0 comments on commit 82ce2c9

Please sign in to comment.