Skip to content

Commit

Permalink
KVM: MMU: move bits lost judgement into a separate function
Browse files Browse the repository at this point in the history
Introduce spte_has_volatile_bits() function to judge whether spte
bits will miss, it's more readable and can help us to cleanup code
later

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Oct 24, 2010
1 parent 251464c commit 8672b72
Showing 1 changed file with 17 additions and 3 deletions.
20 changes: 17 additions & 3 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
#endif
}

static bool spte_has_volatile_bits(u64 spte)
{
if (!shadow_accessed_mask)
return false;

if (!is_shadow_present_pte(spte))
return false;

if (spte & shadow_accessed_mask)
return false;

return true;
}

static void update_spte(u64 *sptep, u64 new_spte)
{
u64 old_spte;
Expand Down Expand Up @@ -679,14 +693,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn_t pfn;
u64 old_spte = *sptep;

if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
old_spte & shadow_accessed_mask) {
if (!spte_has_volatile_bits(old_spte))
__set_spte(sptep, new_spte);
} else
else
old_spte = __xchg_spte(sptep, new_spte);

if (!is_rmap_spte(old_spte))
return;

pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
Expand Down

0 comments on commit 8672b72

Please sign in to comment.