Skip to content

Commit

Permalink
KVM: arm64: Use read/write spin lock for MMU protection
Browse files Browse the repository at this point in the history
Replace MMU spinlock with rwlock and update all instances of the lock
being acquired with a write lock acquisition.
Future commit will add a fast path for permission relaxation during
dirty logging under a read lock.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220118015703.3630552-2-jingzhangos@google.com
  • Loading branch information
Jing Zhang authored and Marc Zyngier committed Feb 8, 2022
1 parent dfd42fa commit fcc5bf8
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 18 deletions.
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)

#define KVM_HAVE_MMU_RWLOCK

/*
* Mode of operation configurable with kvm-arm.mode early param.
* See Documentation/admin-guide/kernel-parameters.txt for more information.
Expand Down
36 changes: 18 additions & 18 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
break;

if (resched && next != end)
cond_resched_lock(&kvm->mmu_lock);
cond_resched_rwlock_write(&kvm->mmu_lock);
} while (addr = next, addr != end);

return ret;
Expand Down Expand Up @@ -179,7 +179,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
phys_addr_t end = start + size;

assert_spin_locked(&kvm->mmu_lock);
lockdep_assert_held_write(&kvm->mmu_lock);
WARN_ON(size & ~PAGE_MASK);
WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
may_block));
Expand Down Expand Up @@ -213,13 +213,13 @@ static void stage2_flush_vm(struct kvm *kvm)
int idx, bkt;

idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);

slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, bkt, slots)
stage2_flush_memslot(kvm, memslot);

spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}

Expand Down Expand Up @@ -720,13 +720,13 @@ void stage2_unmap_vm(struct kvm *kvm)

idx = srcu_read_lock(&kvm->srcu);
mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);

slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, bkt, slots)
stage2_unmap_memslot(kvm, memslot);

spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);
mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}
Expand All @@ -736,14 +736,14 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
struct kvm_pgtable *pgt = NULL;

spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);
pgt = mmu->pgt;
if (pgt) {
mmu->pgd_phys = 0;
mmu->pgt = NULL;
free_percpu(mmu->last_vcpu_ran);
}
spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);

if (pgt) {
kvm_pgtable_stage2_destroy(pgt);
Expand Down Expand Up @@ -783,10 +783,10 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (ret)
break;

spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);
ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
&cache);
spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);
if (ret)
break;

Expand Down Expand Up @@ -834,9 +834,9 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
start = memslot->base_gfn << PAGE_SHIFT;
end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;

spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);
stage2_wp_range(&kvm->arch.mmu, start, end);
spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
}

Expand Down Expand Up @@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && device)
return -ENOEXEC;

spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
Expand Down Expand Up @@ -1271,7 +1271,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}

out_unlock:
spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
Expand All @@ -1286,10 +1286,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)

trace_kvm_access_fault(fault_ipa);

spin_lock(&vcpu->kvm->mmu_lock);
write_lock(&vcpu->kvm->mmu_lock);
mmu = vcpu->arch.hw_mmu;
kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
spin_unlock(&vcpu->kvm->mmu_lock);
write_unlock(&vcpu->kvm->mmu_lock);

pte = __pte(kpte);
if (pte_valid(pte))
Expand Down Expand Up @@ -1692,9 +1692,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
phys_addr_t size = slot->npages << PAGE_SHIFT;

spin_lock(&kvm->mmu_lock);
write_lock(&kvm->mmu_lock);
unmap_stage2_range(&kvm->arch.mmu, gpa, size);
spin_unlock(&kvm->mmu_lock);
write_unlock(&kvm->mmu_lock);
}

/*
Expand Down

0 comments on commit fcc5bf8

Please sign in to comment.