Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 372983
b: refs/heads/master
c: 5da5960
h: refs/heads/master
i:
  372981: edcc44b
  372979: b4e6131
  372975: c46be80
v: v3
  • Loading branch information
Takuya Yoshikawa authored and Marcelo Tosatti committed Mar 7, 2013
1 parent ff52552 commit 96852f8
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 33 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 945315b9dbbe102bb3393a34ea4a10fb2a5ff303
refs/heads/master: 5da596078f915a62e39a20e582308eab91b88c9a
55 changes: 23 additions & 32 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2110,30 +2110,37 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
}
}

static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp;

if (list_empty(&kvm->arch.active_mmu_pages))
return false;

sp = list_entry(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);

return true;
}

/*
* Changing the number of mmu pages allocated to the vm
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
*/
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
{
LIST_HEAD(invalid_list);
/*
* If we set the number of mmu pages to be smaller be than the
* number of actived pages , we must to free some mmu pages before we
* change the value
*/

spin_lock(&kvm->mmu_lock);

if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
/* Need to free some mmu pages to achieve the goal. */
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
break;

page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
Expand Down Expand Up @@ -4007,13 +4014,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
LIST_HEAD(invalid_list);

while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *sp;
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
break;

sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
++vcpu->kvm->stat.mmu_recycled;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
Expand Down Expand Up @@ -4182,19 +4186,6 @@ void kvm_mmu_zap_all(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock);
}

static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *page;

if (list_empty(&kvm->arch.active_mmu_pages))
return;

page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
}

static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
Expand Down Expand Up @@ -4229,7 +4220,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);

kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
prepare_zap_oldest_mmu_page(kvm, &invalid_list);
kvm_mmu_commit_zap_page(kvm, &invalid_list);

spin_unlock(&kvm->mmu_lock);
Expand Down

0 comments on commit 96852f8

Please sign in to comment.