Skip to content

Commit

Permalink
KVM: Move wiping of the kvm->vcpus array to common code
Browse files Browse the repository at this point in the history
All architectures have similar loops iterating over the vcpus,
freeing one vcpu at a time, and eventually wiping the reference
off the vcpus array. They are also inconsistently taking
the kvm->lock mutex when wiping the references from the array.

Make this code common, which will simplify further changes.
The locking is dropped altogether, as this should only be called
when there is no further references on the kvm structure.

Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Message-Id: <20211116160403.4074052-2-maz@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Marc Zyngier authored and Paolo Bonzini committed Dec 8, 2021
1 parent dc1ce45 commit 27592ae
Show file tree
Hide file tree
Showing 8 changed files with 22 additions and 75 deletions.
10 changes: 1 addition & 9 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,19 +175,11 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
*/
void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;

bitmap_free(kvm->arch.pmu_filter);

kvm_vgic_destroy(kvm);

for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_vcpu_destroy(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
atomic_set(&kvm->online_vcpus, 0);
kvm_destroy_vcpus(kvm);
}

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Expand Down
21 changes: 1 addition & 20 deletions arch/mips/kvm/mips.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,25 +171,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}

void kvm_mips_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;

kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_vcpu_destroy(vcpu);
}

mutex_lock(&kvm->lock);

for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;

atomic_set(&kvm->online_vcpus, 0);

mutex_unlock(&kvm->lock);
}

static void kvm_mips_free_gpa_pt(struct kvm *kvm)
{
/* It should always be safe to remove after flushing the whole range */
Expand All @@ -199,7 +180,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm)

void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_mips_free_vcpus(kvm);
kvm_destroy_vcpus(kvm);
kvm_mips_free_gpa_pt(kvm);
}

Expand Down
10 changes: 1 addition & 9 deletions arch/powerpc/kvm/powerpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -463,9 +463,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

void kvm_arch_destroy_vm(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;

#ifdef CONFIG_KVM_XICS
/*
* We call kick_all_cpus_sync() to ensure that all
Expand All @@ -476,14 +473,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kick_all_cpus_sync();
#endif

kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_destroy(vcpu);
kvm_destroy_vcpus(kvm);

mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;

atomic_set(&kvm->online_vcpus, 0);

kvmppc_core_destroy_vm(kvm);

Expand Down
10 changes: 1 addition & 9 deletions arch/riscv/kvm/vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;

for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_vcpu_destroy(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
atomic_set(&kvm->online_vcpus, 0);
kvm_destroy_vcpus(kvm);
}

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Expand Down
18 changes: 1 addition & 17 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -2821,27 +2821,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
free_page((unsigned long)(vcpu->arch.sie_block));
}

static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;

kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_destroy(vcpu);

mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;

atomic_set(&kvm->online_vcpus, 0);
mutex_unlock(&kvm->lock);
}

void kvm_arch_destroy_vm(struct kvm *kvm)
{
u16 rc, rrc;

kvm_free_vcpus(kvm);
kvm_destroy_vcpus(kvm);
sca_dispose(kvm);
kvm_s390_gisa_destroy(kvm);
/*
Expand Down
9 changes: 1 addition & 8 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -11423,15 +11423,8 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_clear_async_pf_completion_queue(vcpu);
kvm_unload_vcpu_mmu(vcpu);
}
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_destroy(vcpu);

mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;

atomic_set(&kvm->online_vcpus, 0);
mutex_unlock(&kvm->lock);
kvm_destroy_vcpus(kvm);
}

void kvm_arch_sync_events(struct kvm *kvm)
Expand Down
2 changes: 1 addition & 1 deletion include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -733,7 +733,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
if (WARN_ON_ONCE(!memslot->npages)) { \
} else

void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_destroy_vcpus(struct kvm *kvm);

void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
Expand Down
17 changes: 15 additions & 2 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->last_used_slot = 0;
}

void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kvm_dirty_ring_free(&vcpu->dirty_ring);
kvm_arch_vcpu_destroy(vcpu);
Expand All @@ -450,7 +450,20 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->run);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);

void kvm_destroy_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;

kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_vcpu_destroy(vcpu);
kvm->vcpus[i] = NULL;
}

atomic_set(&kvm->online_vcpus, 0);
}
EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);

#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
Expand Down

0 comments on commit 27592ae

Please sign in to comment.