Skip to content

Commit

Permalink
KVM: MMU: allow more page become unsync at getting sp time
Browse files Browse the repository at this point in the history
Allow more page become asynchronous at getting sp time, if need create new
shadow page for gfn but it not allow unsync(level > 1), we should unsync all
gfn's unsync page

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Aug 1, 2010
1 parent 9cf5cf5 commit 9f1a122
Showing 1 changed file with 37 additions and 10 deletions.
47 changes: 37 additions & 10 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1220,6 +1220,35 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return __kvm_sync_page(vcpu, sp, true);
}

/* @gfn should be write-protected at the call site */
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
unsigned index;
bool flush = false;

index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
if (s->gfn != gfn || !s->unsync || s->role.invalid)
continue;

WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
(vcpu->arch.mmu.sync_page(vcpu, s))) {
kvm_mmu_zap_page(vcpu->kvm, s);
continue;
}
kvm_unlink_unsync_page(vcpu->kvm, s);
flush = true;
}

if (flush)
kvm_mmu_flush_tlb(vcpu);
}

struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
Expand Down Expand Up @@ -1318,8 +1347,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned index;
unsigned quadrant;
struct hlist_head *bucket;
struct kvm_mmu_page *sp, *unsync_sp = NULL;
struct kvm_mmu_page *sp;
struct hlist_node *node, *tmp;
bool need_sync = false;

role = vcpu->arch.mmu.base_role;
role.level = level;
Expand All @@ -1336,17 +1366,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
if (sp->gfn == gfn) {
if (sp->unsync)
unsync_sp = sp;
if (!need_sync && sp->unsync)
need_sync = true;

if (sp->role.word != role.word)
continue;

if (!direct && unsync_sp &&
kvm_sync_page_transient(vcpu, unsync_sp)) {
unsync_sp = NULL;
if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
break;
}

mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
Expand All @@ -1358,9 +1385,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
trace_kvm_mmu_get_page(sp, false);
return sp;
}
if (!direct && unsync_sp)
kvm_sync_page(vcpu, unsync_sp);

++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp)
Expand All @@ -1371,6 +1395,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (!direct) {
if (rmap_write_protect(vcpu->kvm, gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
kvm_sync_pages(vcpu, gfn);

account_shadowed(vcpu->kvm, gfn);
}
if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
Expand Down

0 comments on commit 9f1a122

Please sign in to comment.