Skip to content

Commit

Permalink
KVM: MMU: mark page dirty only when page is really written
Browse files Browse the repository at this point in the history
Mark page dirty only when this page is really written, it's more exacter,
and also can fix dirty page marking in speculation path

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Oct 24, 2010
1 parent 8672b72 commit 4132779
Showing 1 changed file with 28 additions and 19 deletions.
47 changes: 28 additions & 19 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,24 +307,42 @@ static bool spte_has_volatile_bits(u64 spte)
if (!is_shadow_present_pte(spte))
return false;

if (spte & shadow_accessed_mask)
if ((spte & shadow_accessed_mask) &&
(!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
return false;

return true;
}

static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
{
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}

static void update_spte(u64 *sptep, u64 new_spte)
{
u64 old_spte;
u64 mask, old_spte = *sptep;

WARN_ON(!is_rmap_spte(new_spte));

if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask) ||
!is_rmap_spte(*sptep))
new_spte |= old_spte & shadow_dirty_mask;

mask = shadow_accessed_mask;
if (is_writable_pte(old_spte))
mask |= shadow_dirty_mask;

if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
__set_spte(sptep, new_spte);
else {
else
old_spte = __xchg_spte(sptep, new_spte);
if (old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}

if (!shadow_accessed_mask)
return;

if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
Expand Down Expand Up @@ -704,7 +722,7 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (is_writable_pte(old_spte))
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
kvm_set_pfn_dirty(pfn);
}

Expand Down Expand Up @@ -759,13 +777,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
}
spte = rmap_next(kvm, rmapp, spte);
}
if (write_protected) {
pfn_t pfn;

spte = rmap_next(kvm, rmapp, NULL);
pfn = spte_to_pfn(*spte);
kvm_set_pfn_dirty(pfn);
}

/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
Expand Down Expand Up @@ -1938,7 +1949,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
spte = shadow_base_present_pte | shadow_dirty_mask;
spte = shadow_base_present_pte;
if (!speculative)
spte |= shadow_accessed_mask;
if (!dirty)
Expand Down Expand Up @@ -1999,8 +2010,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);

set_pte:
if (is_writable_pte(*sptep) && !is_writable_pte(spte))
kvm_set_pfn_dirty(pfn);
update_spte(sptep, spte);
done:
return ret;
Expand Down

0 comments on commit 4132779

Please sign in to comment.