Skip to content

Commit

Permalink
kvm: retry nx_huge_page_recovery_thread creation
Browse files Browse the repository at this point in the history
A VMM may send a non-fatal signal to its threads, including vCPU tasks,
at any time, and thus may signal vCPU tasks during KVM_RUN.  If a vCPU
task receives the signal while its trying to spawn the huge page recovery
vhost task, then KVM_RUN will fail due to copy_process() returning
-ERESTARTNOINTR.

Rework call_once() to mark the call complete if and only if the called
function succeeds, and plumb the function's true error code back to the
call_once() invoker.  This provides userspace with the correct, non-fatal
error code so that the VMM doesn't terminate the VM on -ENOMEM, and allows
subsequent KVM_RUN a succeed by virtue of retrying creation of the NX huge
page task.

Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
[implemented the kvm user side]
Signed-off-by: Keith Busch <kbusch@kernel.org>
Message-ID: <20250227230631.303431-3-kbusch@meta.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Keith Busch authored and Paolo Bonzini committed Mar 1, 2025
1 parent cb38090 commit 916b7f4
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 19 deletions.
10 changes: 4 additions & 6 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
return true;
}

static void kvm_mmu_start_lpage_recovery(struct once *once)
static int kvm_mmu_start_lpage_recovery(struct once *once)
{
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
struct kvm *kvm = container_of(ka, struct kvm, arch);
Expand All @@ -7472,23 +7472,21 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
kvm, "kvm-nx-lpage-recovery");

if (IS_ERR(nx_thread))
return;
return PTR_ERR(nx_thread);

vhost_task_start(nx_thread);

/* Make the task visible only once it is fully started. */
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
return 0;
}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{
if (nx_hugepage_mitigation_hard_disabled)
return 0;

call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
if (!kvm->arch.nx_huge_page_recovery_thread)
return -ENOMEM;
return 0;
return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
Expand Down
47 changes: 34 additions & 13 deletions include/linux/call_once.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,41 @@ do { \
__once_init((once), #once, &__key); \
} while (0)

static inline void call_once(struct once *once, void (*cb)(struct once *))
/*
* call_once - Ensure a function has been called exactly once
*
* @once: Tracking struct
* @cb: Function to be called
*
* If @once has never completed successfully before, call @cb and, if
* it returns a zero or positive value, mark @once as completed. Return
* the value returned by @cb
*
* If @once has completed succesfully before, return 0.
*
* The call to @cb is implicitly surrounded by a mutex, though for
* efficiency the * function avoids taking it after the first call.
*/
static inline int call_once(struct once *once, int (*cb)(struct once *))
{
/* Pairs with atomic_set_release() below. */
if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
return;

guard(mutex)(&once->lock);
WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
if (atomic_read(&once->state) != ONCE_NOT_STARTED)
return;

atomic_set(&once->state, ONCE_RUNNING);
cb(once);
atomic_set_release(&once->state, ONCE_COMPLETED);
int r, state;

/* Pairs with atomic_set_release() below. */
if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
return 0;

guard(mutex)(&once->lock);
state = atomic_read(&once->state);
if (unlikely(state != ONCE_NOT_STARTED))
return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;

atomic_set(&once->state, ONCE_RUNNING);
r = cb(once);
if (r < 0)
atomic_set(&once->state, ONCE_NOT_STARTED);
else
atomic_set_release(&once->state, ONCE_COMPLETED);
return r;
}

#endif /* _LINUX_CALL_ONCE_H */

0 comments on commit 916b7f4

Please sign in to comment.