Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 202297
b: refs/heads/master
c: 9cf5cf5
h: refs/heads/master
i:
  202295: b982e06
v: v3
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Aug 1, 2010
1 parent 7a83da4 commit fe0aceb
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 45 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 221d059d15f1c8bd070a63fd45cd8d2598af5f99
refs/heads/master: 9cf5cf5ad43b293581e5b87678ea5783c06d1a41
82 changes: 38 additions & 44 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1170,26 +1170,6 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
return __mmu_unsync_walk(sp, pvec);
}

static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
{
unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *sp;
struct hlist_node *node;

pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
hlist_for_each_entry(sp, node, bucket, hash_link)
if (sp->gfn == gfn && !sp->role.direct
&& !sp->role.invalid) {
pgprintk("%s: found role %x\n",
__func__, sp->role.word);
return sp;
}
return NULL;
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
Expand Down Expand Up @@ -1759,47 +1739,61 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);

static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
sp->unsync = 1;

kvm_mmu_mark_parents_unsync(sp);
mmu_convert_notrap(sp);
}

static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
unsigned index;

index = kvm_page_table_hashfn(sp->gfn);
index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
/* don't unsync if pagetable is shadowed with multiple roles */

hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
if (s->gfn != sp->gfn || s->role.direct)
if (s->gfn != gfn || s->role.direct || s->unsync ||
s->role.invalid)
continue;
if (s->role.word != sp->role.word)
return 1;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
__kvm_unsync_page(vcpu, s);
}
trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
sp->unsync = 1;

kvm_mmu_mark_parents_unsync(sp);

mmu_convert_notrap(sp);
return 0;
}

static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
struct kvm_mmu_page *shadow;
unsigned index;
struct hlist_head *bucket;
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
bool need_unsync = false;

index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
if (s->gfn != gfn || s->role.direct || s->role.invalid)
continue;

shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
if (shadow) {
if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
if (shadow->unsync)
return 0;
if (can_unsync && oos_shadow)
return kvm_unsync_page(vcpu, shadow);
return 1;

if (!need_unsync && !s->unsync) {
if (!can_unsync || !oos_shadow)
return 1;
need_unsync = true;
}
}
if (need_unsync)
kvm_unsync_pages(vcpu, gfn);
return 0;
}

Expand Down

0 comments on commit fe0aceb

Please sign in to comment.