Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 202322
b: refs/heads/master
c: f41d335
h: refs/heads/master
v: v3
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Aug 1, 2010
1 parent 838cb15 commit 4024900
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d98ba053656c033180781007241f2c9d54606d56
refs/heads/master: f41d335a02d5132c14ec0459d3b2790eeb16fb11
51 changes: 23 additions & 28 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1205,13 +1205,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);

#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
hlist_for_each_entry_safe(sp, pos, n, \
#define for_each_gfn_sp(kvm, sp, gfn, pos) \
hlist_for_each_entry(sp, pos, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn)) {} else

#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
hlist_for_each_entry_safe(sp, pos, n, \
#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
hlist_for_each_entry(sp, pos, \
&(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
if ((sp)->gfn != (gfn) || (sp)->role.direct || \
(sp)->role.invalid) {} else
Expand Down Expand Up @@ -1265,11 +1265,11 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
struct hlist_node *node;
LIST_HEAD(invalid_list);
bool flush = false;

for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (!s->unsync)
continue;

Expand Down Expand Up @@ -1387,7 +1387,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
union kvm_mmu_page_role role;
unsigned quadrant;
struct kvm_mmu_page *sp;
struct hlist_node *node, *tmp;
struct hlist_node *node;
bool need_sync = false;

role = vcpu->arch.mmu.base_role;
Expand All @@ -1401,7 +1401,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
for_each_gfn_sp(vcpu->kvm, sp, gfn, node, tmp) {
for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
if (!need_sync && sp->unsync)
need_sync = true;

Expand Down Expand Up @@ -1656,19 +1656,18 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
struct hlist_node *node, *n;
struct hlist_node *node;
LIST_HEAD(invalid_list);
int r;

pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
r = 0;
restart:
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, n) {

for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
goto restart;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
return r;
Expand All @@ -1677,15 +1676,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
struct hlist_node *node, *nn;
struct hlist_node *node;
LIST_HEAD(invalid_list);

restart:
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
goto restart;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
}
Expand Down Expand Up @@ -1830,9 +1827,9 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
struct hlist_node *node;

for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
Expand All @@ -1844,10 +1841,10 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync)
{
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
struct hlist_node *node;
bool need_unsync = false;

for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;

Expand Down Expand Up @@ -2724,7 +2721,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
struct hlist_node *node, *n;
struct hlist_node *node;
LIST_HEAD(invalid_list);
u64 entry, gentry;
u64 *spte;
Expand Down Expand Up @@ -2794,8 +2791,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
}
}

restart:
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node, n) {
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
Expand All @@ -2812,9 +2808,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
if (kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
&invalid_list))
goto restart;
kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
&invalid_list);
++vcpu->kvm->stat.mmu_flooded;
continue;
}
Expand Down

0 comments on commit 4024900

Please sign in to comment.