Skip to content

Commit

Permalink
KVM: x86/mmu: Zap only TDP MMU leafs in kvm_zap_gfn_range()
Browse files Browse the repository at this point in the history
Zap only leaf SPTEs in the TDP MMU's zap_gfn_range(), and rename various
functions accordingly.  When removing mappings for functional correctness
(except for the stupid VFIO GPU passthrough memslots bug), zapping the
leaf SPTEs is sufficient as the paging structures themselves do not point
at guest memory and do not directly impact the final translation (in the
TDP MMU).

Note, this aligns the TDP MMU with the legacy/full MMU, which zaps only
the rmaps, a.k.a. leaf SPTEs, in kvm_zap_gfn_range() and
kvm_unmap_gfn_range().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Message-Id: <20220226001546.360188-18-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Sean Christopherson authored and Paolo Bonzini committed Mar 8, 2022
1 parent acbda82 commit cf3e264
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 39 deletions.
4 changes: 2 additions & 2 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5845,8 +5845,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)

if (is_tdp_mmu_enabled(kvm)) {
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
gfn_end, flush);
flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
gfn_end, true, flush);
}

if (flush)
Expand Down
41 changes: 11 additions & 30 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -834,53 +834,34 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
}

/*
* Tears down the mappings for the range of gfns, [start, end), and frees the
* non-root pages mapping GFNs strictly within that range. Returns true if
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
* Zap leafs SPTEs for the range of gfns, [start, end). Returns true if SPTEs
* have been cleared and a TLB flush is needed before releasing the MMU lock.
*
* If can_yield is true, will release the MMU lock and reschedule if the
* scheduler needs the CPU or there is contention on the MMU lock. If this
* function cannot yield, it will not release the MMU lock or reschedule and
* the caller must ensure it does not supply too large a GFN range, or the
* operation can cause a soft lockup.
*/
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush)
static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush)
{
bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
struct tdp_iter iter;

/*
* No need to try to step down in the iterator when zapping all SPTEs,
* zapping the top-level non-leaf SPTEs will recurse on their children.
*/
int min_level = zap_all ? root->role.level : PG_LEVEL_4K;

end = min(end, tdp_mmu_max_gfn_host());

lockdep_assert_held_write(&kvm->mmu_lock);

rcu_read_lock();

for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
flush = false;
continue;
}

if (!is_shadow_present_pte(iter.old_spte))
continue;

/*
* If this is a non-last-level SPTE that covers a larger range
* than should be zapped, continue, and zap the mappings at a
* lower level, except when zapping all SPTEs.
*/
if (!zap_all &&
(iter.gfn < start ||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;

Expand All @@ -898,13 +879,13 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
*/
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
gfn_t end, bool can_yield, bool flush)
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
bool can_yield, bool flush)
{
struct kvm_mmu_page *root;

for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, false);

return flush;
}
Expand Down Expand Up @@ -1202,8 +1183,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
bool flush)
{
return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start,
range->end, range->may_block, flush);
return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
range->end, range->may_block, flush);
}

typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
Expand Down
8 changes: 1 addition & 7 deletions arch/x86/kvm/mmu/tdp_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,8 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared);

bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
gfn_t end, bool can_yield, bool flush);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
gfn_t start, gfn_t end, bool flush)
{
return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
}

bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
Expand Down

0 comments on commit cf3e264

Please sign in to comment.