Skip to content

Commit

Permalink
KVM: MMU: Move gpte_access() out of paging_tmpl.h
Browse files Browse the repository at this point in the history
We no longer rely on paging_tmpl.h defines; so we can move the function
to mmu.c.

Rely on zero extension to 64 bits to get the correct nx behaviour.

Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Avi Kivity committed Sep 20, 2012
1 parent edc2ae8 commit 3d34ade
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 16 deletions.
10 changes: 10 additions & 0 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -3437,6 +3437,16 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
return false;
}

static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
{
unsigned access;

access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
access &= ~(gpte >> PT64_NX_SHIFT);

return access;
}

#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
Expand Down
21 changes: 5 additions & 16 deletions arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,17 +101,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return (ret != orig_pte);
}

static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
{
unsigned access;

access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
#if PTTYPE == 64
access &= ~(gpte >> PT64_NX_SHIFT);
#endif
return access;
}

static bool FNAME(is_last_gpte)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t gpte)
Expand Down Expand Up @@ -217,7 +206,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,

last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
if (last_gpte) {
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
pte_access = pt_access & gpte_access(vcpu, pte);
/* check if the kernel is fetching from user page */
if (unlikely(pte_access & PT_USER_MASK) &&
kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
Expand Down Expand Up @@ -268,7 +257,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
break;
}

pt_access &= FNAME(gpte_access)(vcpu, pte);
pt_access &= gpte_access(vcpu, pte);
--walker->level;
}

Expand Down Expand Up @@ -364,7 +353,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return;

pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
pte_access = sp->role.access & gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (mmu_invalid_pfn(pfn))
Expand Down Expand Up @@ -438,7 +427,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;

pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
pte_access = sp->role.access & gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
Expand Down Expand Up @@ -791,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)

gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(vcpu, gpte);
pte_access &= gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);

if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
Expand Down

0 comments on commit 3d34ade

Please sign in to comment.