Skip to content

Commit

Permalink
kvm: x86/mmu: Support write protection for nesting in tdp MMU
Browse files Browse the repository at this point in the history
To support nested virtualization, KVM will sometimes need to write
protect pages which are part of a shadowed paging structure or are not
writable in the shadowed paging structure. Add a function to write
protect GFN mappings for this purpose.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This series introduced no new failures.

This series can be viewed in Gerrit at:
	https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538

Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20201014182700.2888246-18-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Ben Gardon authored and Paolo Bonzini committed Oct 23, 2020
1 parent 1488199 commit 46044f7
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 0 deletions.
4 changes: 4 additions & 0 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1299,6 +1299,10 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}

if (kvm->arch.tdp_mmu_enabled)
write_protected |=
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);

return write_protected;
}

Expand Down
50 changes: 50 additions & 0 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1078,3 +1078,53 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
kvm_mmu_put_root(kvm, root);
}
}

/*
* Removes write access on the last level SPTE mapping this GFN and unsets the
* SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
* Returns true if an SPTE was set and a TLB flush is needed.
*/
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn)
{
struct tdp_iter iter;
u64 new_spte;
bool spte_set = false;

tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
if (!is_writable_pte(iter.old_spte))
break;

new_spte = iter.old_spte &
~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);

tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;
}

return spte_set;
}

/*
* Removes write access on the last level SPTE mapping this GFN and unsets the
* SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
* Returns true if an SPTE was set and a TLB flush is needed.
*/
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn)
{
struct kvm_mmu_page *root;
int root_as_id;
bool spte_set = false;

lockdep_assert_held(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;

spte_set |= write_protect_gfn(kvm, root, gfn);
}
return spte_set;
}

3 changes: 3 additions & 0 deletions arch/x86/kvm/mmu/tdp_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot);

bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
#endif /* __KVM_X86_MMU_TDP_MMU_H */

0 comments on commit 46044f7

Please sign in to comment.