Skip to content

Commit

Permalink
KVM: X86: Change kvm_sync_page() to return true when remote flush is …
Browse files Browse the repository at this point in the history
…needed

Currently kvm_sync_page() returns true when there is any present spte.
But the return value is ignored in the callers.

Changing kvm_sync_page() to return true when remote flush is needed and
changing mmu->sync_page() not to directly flush can combine and reduce
remote flush requests.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210918005636.3675-7-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Lai Jiangshan authored and Paolo Bonzini committed Sep 30, 2021
1 parent 06152b2 commit c3e5e41
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 19 deletions.
21 changes: 13 additions & 8 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1795,7 +1795,7 @@ static void mark_unsync(u64 *spte)
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
return 0;
return -1;
}

#define KVM_PAGE_ARRAY_NR 16
Expand Down Expand Up @@ -1909,12 +1909,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
int ret = vcpu->arch.mmu->sync_page(vcpu, sp);

if (ret < 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}

return true;
return !!ret;
}

static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
Expand Down Expand Up @@ -2024,6 +2026,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
LIST_HEAD(invalid_list);
bool flush = false;

while (mmu_unsync_walk(parent, &pages)) {
bool protected = false;
Expand All @@ -2033,25 +2036,27 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,

if (protected) {
kvm_flush_remote_tlbs(vcpu->kvm);
flush = false;
}

for_each_sp(pages, sp, parents, i) {
kvm_unlink_unsync_page(vcpu->kvm, sp);
kvm_sync_page(vcpu, sp, &invalid_list);
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false);
kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
if (!can_yield) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
return -EINTR;
}

cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
flush = false;
}
}

kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false);
kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
return 0;
}

Expand Down Expand Up @@ -2135,6 +2140,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
break;

WARN_ON(!list_empty(&invalid_list));
kvm_flush_remote_tlbs(vcpu->kvm);
}

__clear_sp_write_flooding_count(sp);
Expand Down Expand Up @@ -4191,15 +4197,14 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
}

static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access, int *nr_present)
unsigned int access)
{
if (unlikely(is_mmio_spte(*sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) {
mmu_spte_clear_no_track(sptep);
return true;
}

(*nr_present)++;
mark_mmio_spte(vcpu, sptep, gfn, access);
return true;
}
Expand Down
21 changes: 10 additions & 11 deletions arch/x86/kvm/mmu/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -1066,11 +1066,16 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
* Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn
* can't change unless all sptes pointing to it are nuked first.
*
* Returns
* < 0: the sp should be zapped
* 0: the sp is synced and no tlb flushing is required
* > 0: the sp is synced and tlb flushing is required
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
int i, nr_present = 0;
int i;
bool host_writable;
gpa_t first_pte_gpa;
int set_spte_ret = 0;
Expand Down Expand Up @@ -1098,7 +1103,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
*/
if (WARN_ON_ONCE(sp->role.direct ||
(sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
return 0;
return -1;

first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);

Expand All @@ -1115,7 +1120,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)

if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
sizeof(pt_element_t)))
return 0;
return -1;

if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
Expand All @@ -1127,8 +1132,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
pte_access &= FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);

if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present))
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
continue;

if (gfn != sp->gfns[i]) {
Expand All @@ -1137,8 +1141,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
continue;
}

nr_present++;

host_writable = sp->spt[i] & shadow_host_writable_mask;

set_spte_ret |= set_spte(vcpu, &sp->spt[i],
Expand All @@ -1147,10 +1149,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
true, false, host_writable);
}

if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
kvm_flush_remote_tlbs(vcpu->kvm);

return nr_present;
return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH;
}

#undef pt_element_t
Expand Down

0 comments on commit c3e5e41

Please sign in to comment.