Skip to content

Commit

Permalink
KVM: X86: Don't unload MMU in kvm_vcpu_flush_tlb_guest()
Browse files Browse the repository at this point in the history
kvm_mmu_unload() destroys all the PGD caches.  Use the lighter
kvm_mmu_sync_roots() and kvm_mmu_sync_prev_roots() instead.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211019110154.4091-5-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Lai Jiangshan authored and Paolo Bonzini committed Oct 22, 2021
1 parent 264d3dc commit 61b05a9
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 6 deletions.
1 change: 1 addition & 0 deletions arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);

static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
Expand Down
16 changes: 16 additions & 0 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -3647,6 +3647,9 @@ static bool is_unsync_root(hpa_t root)
{
struct kvm_mmu_page *sp;

if (!VALID_PAGE(root))
return false;

/*
* The read barrier orders the CPU's read of SPTE.W during the page table
* walk before the reads of sp->unsync/sp->unsync_children here.
Expand Down Expand Up @@ -3714,6 +3717,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
write_unlock(&vcpu->kvm->mmu_lock);
}

void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
{
unsigned long roots_to_free = 0;
int i;

for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);

/* sync prev_roots by simply freeing them */
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
}

static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
u32 access, struct x86_exception *exception)
{
Expand Down
11 changes: 5 additions & 6 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -3245,15 +3245,14 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
++vcpu->stat.tlb_flush;

if (!tdp_enabled) {
/*
/*
* A TLB flush on behalf of the guest is equivalent to
* INVPCID(all), toggling CR4.PGE, etc., which requires
* a forced sync of the shadow page tables. Unload the
* entire MMU here and the subsequent load will sync the
* shadow page tables, and also flush the TLB.
* a forced sync of the shadow page tables. Ensure all the
* roots are synced and the guest TLB in hardware is clean.
*/
kvm_mmu_unload(vcpu);
return;
kvm_mmu_sync_roots(vcpu);
kvm_mmu_sync_prev_roots(vcpu);
}

static_call(kvm_x86_tlb_flush_guest)(vcpu);
Expand Down

0 comments on commit 61b05a9

Please sign in to comment.