Skip to content

Commit

Permalink
x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation
Browse files Browse the repository at this point in the history
Use kvm_lock_all_vcpus instead of sev's own implementation.

Because kvm_lock_all_vcpus uses the _nest_lock feature of lockdep, which
ignores subclasses, there is no longer a need to use separate subclasses
for source and target VMs.

No functional change intended.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Message-ID: <20250512180407.659015-5-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Maxim Levitsky authored and Paolo Bonzini committed May 27, 2025
1 parent e4a454c commit c560bc9
Showing 1 changed file with 4 additions and 68 deletions.
72 changes: 4 additions & 68 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1882,70 +1882,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
atomic_set_release(&src_sev->migration_in_progress, 0);
}

/* vCPU mutex subclasses. */
enum sev_migration_role {
SEV_MIGRATION_SOURCE = 0,
SEV_MIGRATION_TARGET,
SEV_NR_MIGRATION_ROLES,
};

static int sev_lock_vcpus_for_migration(struct kvm *kvm,
enum sev_migration_role role)
{
struct kvm_vcpu *vcpu;
unsigned long i, j;

kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable_nested(&vcpu->mutex, role))
goto out_unlock;

#ifdef CONFIG_PROVE_LOCKING
if (!i)
/*
* Reset the role to one that avoids colliding with
* the role used for the first vcpu mutex.
*/
role = SEV_NR_MIGRATION_ROLES;
else
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
#endif
}

return 0;

out_unlock:

kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j)
break;

#ifdef CONFIG_PROVE_LOCKING
if (j)
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
#endif

mutex_unlock(&vcpu->mutex);
}
return -EINTR;
}

static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
bool first = true;

kvm_for_each_vcpu(i, vcpu, kvm) {
if (first)
first = false;
else
mutex_acquire(&vcpu->mutex.dep_map,
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);

mutex_unlock(&vcpu->mutex);
}
}

static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
Expand Down Expand Up @@ -2083,10 +2019,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
charged = true;
}

ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
ret = kvm_lock_all_vcpus(kvm);
if (ret)
goto out_dst_cgroup;
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
ret = kvm_lock_all_vcpus(source_kvm);
if (ret)
goto out_dst_vcpu;

Expand All @@ -2100,9 +2036,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
ret = 0;

out_source_vcpu:
sev_unlock_vcpus_for_migration(source_kvm);
kvm_unlock_all_vcpus(source_kvm);
out_dst_vcpu:
sev_unlock_vcpus_for_migration(kvm);
kvm_unlock_all_vcpus(kvm);
out_dst_cgroup:
/* Operates on the source on success, on the destination on failure. */
if (charged)
Expand Down

0 comments on commit c560bc9

Please sign in to comment.