Skip to content

Commit

Permalink
KVM: MMU: clear write-flooding on the fast path of tracked page
Browse files Browse the repository at this point in the history
If the page fault is caused by write access on write tracked page, the
real shadow page walking is skipped, we lost the chance to clear write
flooding for the page structure current vcpu is using

Fix it by locklessly waking shadow page table to clear write flooding
on the shadow page structure out of mmu-lock. So that we change the
count to atomic_t

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Xiao Guangrong authored and Paolo Bonzini committed Mar 3, 2016
1 parent 3d0c27a commit e5691a8
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 4 deletions.
2 changes: 1 addition & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ struct kvm_mmu_page {
#endif

/* Number of writes since the last time traversal visited this page. */
int write_flooding_count;
atomic_t write_flooding_count;
};

struct kvm_pio_request {
Expand Down
22 changes: 20 additions & 2 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2063,7 +2063,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,

static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
sp->write_flooding_count = 0;
atomic_set(&sp->write_flooding_count, 0);
}

static void clear_sp_write_flooding_count(u64 *spte)
Expand Down Expand Up @@ -3406,6 +3406,23 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
return false;
}

static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
struct kvm_shadow_walk_iterator iterator;
u64 spte;

if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;

walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
clear_sp_write_flooding_count(iterator.sptep);
if (!is_shadow_present_pte(spte))
break;
}
walk_shadow_page_lockless_end(vcpu);
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
Expand Down Expand Up @@ -4221,7 +4238,8 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
if (sp->role.level == PT_PAGE_TABLE_LEVEL)
return false;

return ++sp->write_flooding_count >= 3;
atomic_inc(&sp->write_flooding_count);
return atomic_read(&sp->write_flooding_count) >= 3;
}

/*
Expand Down
4 changes: 3 additions & 1 deletion arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -728,8 +728,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0;
}

if (page_fault_handle_page_track(vcpu, error_code, walker.gfn))
if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
shadow_page_table_clear_flood(vcpu, addr);
return 1;
}

vcpu->arch.write_fault_to_shadow_pgtable = false;

Expand Down

0 comments on commit e5691a8

Please sign in to comment.