Skip to content

Commit

Permalink
KVM: MMU: Atomically check for accessed bit when dropping an spte
Browse files Browse the repository at this point in the history
Currently, in the window between the check for the accessed bit, and actually
dropping the spte, a vcpu can access the page through the spte and set the bit,
which will be ignored by the mmu.

Fix by using an exchange operation to atmoically fetch the spte and drop it.

Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Avi Kivity committed Aug 2, 2010
1 parent ce06186 commit a9221dd
Showing 1 changed file with 21 additions and 7 deletions.
28 changes: 21 additions & 7 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,21 @@ static void __set_spte(u64 *sptep, u64 spte)
#endif
}

static u64 __xchg_spte(u64 *sptep, u64 new_spte)
{
#ifdef CONFIG_X86_64
return xchg(sptep, new_spte);
#else
u64 old_spte;

do {
old_spte = *sptep;
} while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);

return old_spte;
#endif
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min)
{
Expand Down Expand Up @@ -653,18 +668,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{
pfn_t pfn;
u64 old_spte;

if (!is_rmap_spte(*sptep)) {
__set_spte(sptep, new_spte);
old_spte = __xchg_spte(sptep, new_spte);
if (!is_rmap_spte(old_spte))
return;
}
pfn = spte_to_pfn(*sptep);
if (*sptep & shadow_accessed_mask)
pfn = spte_to_pfn(old_spte);
if (old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (is_writable_pte(*sptep))
if (is_writable_pte(old_spte))
kvm_set_pfn_dirty(pfn);
rmap_remove(kvm, sptep);
__set_spte(sptep, new_spte);
}

static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
Expand Down

0 comments on commit a9221dd

Please sign in to comment.