Skip to content

Commit

Permalink
KVM: MMU: cleanup FNAME(invlpg)
Browse files Browse the repository at this point in the history
Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the
same code between FNAME(invlpg) and FNAME(sync_page)

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Xiao Guangrong authored and Avi Kivity committed Dec 27, 2011
1 parent d01f8d5 commit 505aef8
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 33 deletions.
16 changes: 10 additions & 6 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1809,25 +1809,29 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
}
}

static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte)
{
u64 pte;
struct kvm_mmu_page *child;

pte = *spte;
if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level))
if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
else {
if (is_large_pte(pte))
--kvm->stat.lpages;
} else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
}
} else if (is_mmio_spte(pte))
return true;
}

if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte);

if (is_large_pte(pte))
--kvm->stat.lpages;
return false;
}

static void kvm_mmu_page_unlink_children(struct kvm *kvm,
Expand Down
44 changes: 17 additions & 27 deletions arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -656,14 +656,25 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
return 0;
}

static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;

WARN_ON(sp->role.level != 1);

if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;

return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}

static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1;
int level;
u64 *sptep;
int need_flush = 0;

vcpu_clear_mmio_info(vcpu, gva);

Expand All @@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)

sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
int offset, shift;

if (!sp->unsync)
break;

shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;

pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
pte_gpa = FNAME(get_level1_sp_gpa)(sp);
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);

if (is_shadow_present_pte(*sptep)) {
if (is_large_pte(*sptep))
--vcpu->kvm->stat.lpages;
drop_spte(vcpu->kvm, sptep);
need_flush = 1;
} else if (is_mmio_spte(*sptep))
mmu_spte_clear_no_track(sptep);

break;
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
kvm_flush_remote_tlbs(vcpu->kvm);
}

if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}

if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);

atomic_inc(&vcpu->kvm->arch.invlpg_counter);

spin_unlock(&vcpu->kvm->mmu_lock);
Expand Down Expand Up @@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
int i, offset, nr_present;
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;

offset = nr_present = 0;

/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);

if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;

first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);

for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
Expand Down

0 comments on commit 505aef8

Please sign in to comment.