Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 329916
b: refs/heads/master
c: 8ea667f
h: refs/heads/master
v: v3
  • Loading branch information
Avi Kivity committed Sep 20, 2012
1 parent 18b2734 commit 39396d3
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 14 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 879238fecc051d95037ae76332916209a7770709
refs/heads/master: 8ea667f259e3767fd3ee85a885c14e417835695e
12 changes: 12 additions & 0 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -3408,6 +3408,18 @@ static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}

static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
{
unsigned mask;

BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);

mask = (unsigned)~ACC_WRITE_MASK;
/* Allow write access to dirty gptes */
mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
*access &= mask;
}

static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
int *nr_present)
{
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_SHIFT 5
#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_DIRTY_SHIFT 6
#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
Expand Down
24 changes: 12 additions & 12 deletions trunk/arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,11 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return (ret != orig_pte);
}

static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte,
bool last)
static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
{
unsigned access;

access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
if (last && !is_dirty_gpte(gpte))
access &= ~ACC_WRITE_MASK;

#if PTTYPE == 64
if (vcpu->arch.mmu.nx)
Expand Down Expand Up @@ -222,8 +219,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,

last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
if (last_gpte) {
pte_access = pt_access &
FNAME(gpte_access)(vcpu, pte, true);
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
/* check if the kernel is fetching from user page */
if (unlikely(pte_access & PT_USER_MASK) &&
kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
Expand Down Expand Up @@ -274,7 +270,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
break;
}

pt_access &= FNAME(gpte_access)(vcpu, pte, false);
pt_access &= FNAME(gpte_access)(vcpu, pte);
--walker->level;
}

Expand All @@ -283,7 +279,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
goto error;
}

if (write_fault && unlikely(!is_dirty_gpte(pte))) {
if (!write_fault)
protect_clean_gpte(&pte_access, pte);
else if (unlikely(!is_dirty_gpte(pte))) {
int ret;

trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
Expand Down Expand Up @@ -368,7 +366,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return;

pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (mmu_invalid_pfn(pfn))
return;
Expand Down Expand Up @@ -441,8 +440,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;

pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
true);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pte_access & ACC_WRITE_MASK);
Expand Down Expand Up @@ -794,7 +793,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)

gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(vcpu, gpte, true);
pte_access &= FNAME(gpte_access)(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);

if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
continue;
Expand Down

0 comments on commit 39396d3

Please sign in to comment.