Skip to content

Commit

Permalink
KVM: x86/mmu: Skip rmap operations if rmaps not allocated
Browse files Browse the repository at this point in the history
If only the TDP MMU is being used to manage the memory mappings for a VM,
then many rmap operations can be skipped as they are guaranteed to be
no-ops. This saves some time which would be spent on the rmap operation.
It also avoids acquiring the MMU lock in write mode for many operations.

This makes it safe to run the VM without rmaps allocated, when only
using the TDP MMU and sets the stage for waiting to allocate the rmaps
until they're needed.

Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210518173414.450044-7-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Ben Gardon authored and Paolo Bonzini committed Jun 17, 2021
1 parent a255740 commit e220971
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 48 deletions.
5 changes: 5 additions & 0 deletions arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -232,4 +232,9 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);

static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
{
return kvm->arch.memslots_have_rmaps;
}

#endif
113 changes: 66 additions & 47 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1183,6 +1183,10 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);

if (!kvm_memslots_have_rmaps(kvm))
return;

while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
Expand Down Expand Up @@ -1212,6 +1216,10 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);

if (!kvm_memslots_have_rmaps(kvm))
return;

while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
Expand Down Expand Up @@ -1278,9 +1286,11 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
int i;
bool write_protected = false;

for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
if (kvm_memslots_have_rmaps(kvm)) {
for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}
}

if (is_tdp_mmu_enabled(kvm))
Expand Down Expand Up @@ -1451,9 +1461,10 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,

bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool flush;
bool flush = false;

flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (kvm_memslots_have_rmaps(kvm))
flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);

if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
Expand All @@ -1463,9 +1474,10 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)

bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool flush;
bool flush = false;

flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
if (kvm_memslots_have_rmaps(kvm))
flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);

if (is_tdp_mmu_enabled(kvm))
flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
Expand Down Expand Up @@ -1518,9 +1530,10 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)

bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young;
bool young = false;

young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
if (kvm_memslots_have_rmaps(kvm))
young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);

if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
Expand All @@ -1530,9 +1543,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)

bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young;
bool young = false;

young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
if (kvm_memslots_have_rmaps(kvm))
young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);

if (is_tdp_mmu_enabled(kvm))
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
Expand Down Expand Up @@ -5534,29 +5548,29 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
int i;
bool flush = false;

write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
gfn_t start, end;

start = max(gfn_start, memslot->base_gfn);
end = min(gfn_end, memslot->base_gfn + memslot->npages);
if (start >= end)
continue;
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
gfn_t start, end;

start = max(gfn_start, memslot->base_gfn);
end = min(gfn_end, memslot->base_gfn + memslot->npages);
if (start >= end)
continue;

flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true, flush);
flush = slot_handle_level_range(kvm, memslot,
kvm_zap_rmapp, PG_LEVEL_4K,
KVM_MAX_HUGEPAGE_LEVEL, start,
end - 1, true, flush);
}
}
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
write_unlock(&kvm->mmu_lock);
}

if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);

write_unlock(&kvm->mmu_lock);

if (is_tdp_mmu_enabled(kvm)) {
flush = false;

Expand All @@ -5583,12 +5597,15 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
int start_level)
{
bool flush;
bool flush = false;

write_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
write_unlock(&kvm->mmu_lock);
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL,
false);
write_unlock(&kvm->mmu_lock);
}

if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
Expand Down Expand Up @@ -5658,16 +5675,15 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
bool flush;

write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);

if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock);
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock);
}

if (is_tdp_mmu_enabled(kvm)) {
flush = false;

read_lock(&kvm->mmu_lock);
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush)
Expand All @@ -5694,11 +5710,14 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
bool flush = false;

write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
write_unlock(&kvm->mmu_lock);
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
false);
write_unlock(&kvm->mmu_lock);
}

if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -10975,7 +10975,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
*/
memset(&slot->arch, 0, sizeof(slot->arch));

if (kvm->arch.memslots_have_rmaps) {
if (kvm_memslots_have_rmaps(kvm)) {
r = memslot_rmap_alloc(slot, npages);
if (r)
return r;
Expand Down

0 comments on commit e220971

Please sign in to comment.