Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 215754
b: refs/heads/master
c: 49d5ca2
h: refs/heads/master
v: v3
  • Loading branch information
Dave Hansen authored and Avi Kivity committed Oct 24, 2010
1 parent cf62172 commit bea1ad6
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 21 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 39de71ec5397f374aed95e99509372d605e1407c
refs/heads/master: 49d5ca26636cb8feb05aff92fc4dba3e494ec683
2 changes: 1 addition & 1 deletion trunk/arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ struct kvm_vcpu_arch {
};

struct kvm_arch {
unsigned int n_free_mmu_pages;
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
atomic_t invlpg_counter;
Expand Down
27 changes: 9 additions & 18 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -980,7 +980,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
if (!sp->role.direct)
__free_page(virt_to_page(sp->gfns));
kmem_cache_free(mmu_page_header_cache, sp);
++kvm->arch.n_free_mmu_pages;
--kvm->arch.n_used_mmu_pages;
}

static unsigned kvm_page_table_hashfn(gfn_t gfn)
Expand All @@ -1003,7 +1003,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
sp->multimapped = 0;
sp->parent_pte = parent_pte;
--vcpu->kvm->arch.n_free_mmu_pages;
++vcpu->kvm->arch.n_used_mmu_pages;
return sp;
}

Expand Down Expand Up @@ -1689,41 +1689,32 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,

/*
* Changing the number of mmu pages allocated to the vm
* Note: if kvm_nr_mmu_pages is too small, you will get dead lock
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
{
int used_pages;
LIST_HEAD(invalid_list);

used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
used_pages = max(0, used_pages);

/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/

if (used_pages > kvm_nr_mmu_pages) {
while (used_pages > kvm_nr_mmu_pages &&
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;

page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
used_pages -= kvm_mmu_prepare_zap_page(kvm, page,
kvm_mmu_prepare_zap_page(kvm, page,
&invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
else
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
- kvm->arch.n_max_mmu_pages;

kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
}

static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);

static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
return kvm->arch.n_free_mmu_pages;
return kvm->arch.n_max_mmu_pages -
kvm->arch.n_used_mmu_pages;
}

static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
Expand Down

0 comments on commit bea1ad6

Please sign in to comment.