Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 215755
b: refs/heads/master
c: 45221ab
h: refs/heads/master
i:
  215753: cf62172
  215751: 7ece299
v: v3
  • Loading branch information
Dave Hansen authored and Avi Kivity committed Oct 24, 2010
1 parent bea1ad6 commit a2a38cd
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 11 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 49d5ca26636cb8feb05aff92fc4dba3e494ec683
refs/heads/master: 45221ab6684a82a5b60208b76d6f8bfb1bbcb969
34 changes: 24 additions & 10 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
static struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;

static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
Expand Down Expand Up @@ -971,6 +972,18 @@ static int is_empty_shadow_page(u64 *spt)
}
#endif

/*
* This value is the sum of all of the kvm instances's
* kvm->arch.n_used_mmu_pages values. We need a global,
* aggregate version in order to make the slab shrinker
* faster
*/
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
Expand All @@ -980,7 +993,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
if (!sp->role.direct)
__free_page(virt_to_page(sp->gfns));
kmem_cache_free(mmu_page_header_cache, sp);
--kvm->arch.n_used_mmu_pages;
kvm_mod_used_mmu_pages(kvm, -1);
}

static unsigned kvm_page_table_hashfn(gfn_t gfn)
Expand All @@ -1003,7 +1016,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
sp->multimapped = 0;
sp->parent_pte = parent_pte;
++vcpu->kvm->arch.n_used_mmu_pages;
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
}

Expand Down Expand Up @@ -3122,23 +3135,22 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
struct kvm *kvm;
struct kvm *kvm_freed = NULL;
int cache_count = 0;

if (nr_to_scan == 0)
goto out;

spin_lock(&kvm_lock);

list_for_each_entry(kvm, &vm_list, vm_list) {
int npages, idx, freed_pages;
int idx, freed_pages;
LIST_HEAD(invalid_list);

idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_max_mmu_pages -
kvm_mmu_available_pages(kvm);
cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
if (!kvm_freed && nr_to_scan > 0 &&
kvm->arch.n_used_mmu_pages > 0) {
freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
&invalid_list);
cache_count -= freed_pages;
kvm_freed = kvm;
}
nr_to_scan--;
Expand All @@ -3152,7 +3164,8 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)

spin_unlock(&kvm_lock);

return cache_count;
out:
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}

static struct shrinker mmu_shrinker = {
Expand Down Expand Up @@ -3195,6 +3208,7 @@ int kvm_mmu_module_init(void)
if (!mmu_page_header_cache)
goto nomem;

percpu_counter_init(&kvm_total_used_mmu_pages, 0);
register_shrinker(&mmu_shrinker);

return 0;
Expand Down

0 comments on commit a2a38cd

Please sign in to comment.