Skip to content

Commit

Permalink
KVM: MMU: fold tlb flush judgement into mmu_spte_update
Browse files Browse the repository at this point in the history
mmu_spte_update() is the common function, we can easily audit the path

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Jul 11, 2012
1 parent 4f5982a commit 6e7d035
Showing 1 changed file with 20 additions and 13 deletions.
33 changes: 20 additions & 13 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -479,15 +479,24 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)

/* Rules for using mmu_spte_update:
* Update the state bits, it means the mapped pfn is not changged.
*
* Whenever we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
* will find a read-only spte, even though the writable spte
* might be cached on a CPU's TLB, the return value indicates this
* case.
*/
static void mmu_spte_update(u64 *sptep, u64 new_spte)
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
u64 mask, old_spte = *sptep;
bool ret = false;

WARN_ON(!is_rmap_spte(new_spte));

if (!is_shadow_present_pte(old_spte))
return mmu_spte_set(sptep, new_spte);
if (!is_shadow_present_pte(old_spte)) {
mmu_spte_set(sptep, new_spte);
return ret;
}

new_spte |= old_spte & shadow_dirty_mask;

Expand All @@ -500,13 +509,18 @@ static void mmu_spte_update(u64 *sptep, u64 new_spte)
else
old_spte = __update_clear_spte_slow(sptep, new_spte);

if (is_writable_pte(old_spte) && !is_writable_pte(new_spte))
ret = true;

if (!shadow_accessed_mask)
return;
return ret;

if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));

return ret;
}

/*
Expand Down Expand Up @@ -2268,7 +2282,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
gfn_t gfn, pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte, entry = *sptep;
u64 spte;
int ret = 0;

if (set_mmio_spte(sptep, gfn, pfn, pte_access))
Expand Down Expand Up @@ -2346,14 +2360,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);

set_pte:
mmu_spte_update(sptep, spte);
/*
* If we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
* will find a read-only spte, even though the writable spte
* might be cached on a CPU's TLB.
*/
if (is_writable_pte(entry) && !is_writable_pte(*sptep))
if (mmu_spte_update(sptep, spte))
kvm_flush_remote_tlbs(vcpu->kvm);
done:
return ret;
Expand Down

0 comments on commit 6e7d035

Please sign in to comment.