Skip to content

Commit

Permalink
rqspinlock: Hardcode cond_acquire loops for arm64
Browse files Browse the repository at this point in the history
Currently, for rqspinlock usage, the implementation of
smp_cond_load_acquire (and thus, atomic_cond_read_acquire) are
susceptible to stalls on arm64, because they do not guarantee that the
conditional expression will be repeatedly invoked if the address being
loaded from is not written to by other CPUs. When support for
event-streams is absent (which unblocks stuck WFE-based loops every
~100us), we may end up being stuck forever.

This causes a problem for us, as we need to repeatedly invoke the
RES_CHECK_TIMEOUT in the spin loop to break out when the timeout
expires.

Let us import the smp_cond_load_acquire_timewait implementation Ankur is
proposing in [0], and then fallback to it once it is merged.

While we rely on the implementation to amortize the cost of sampling
check_timeout for us, it will not happen when event stream support is
unavailable. This is not the common case, and it would be difficult to
fit our logic in the time_expr_ns >= time_limit_ns comparison, hence
just let it be.

  [0]: https://lore.kernel.org/lkml/20250203214911.898276-1-ankur.a.arora@oracle.com

Cc: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250316040541.108729-9-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Kumar Kartikeya Dwivedi authored and Alexei Starovoitov committed Mar 19, 2025
1 parent 14c48ee commit ebababc
Show file tree
Hide file tree
Showing 2 changed files with 108 additions and 0 deletions.
93 changes: 93 additions & 0 deletions arch/arm64/include/asm/rqspinlock.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RQSPINLOCK_H
#define _ASM_RQSPINLOCK_H

#include <asm/barrier.h>

/*
* Hardcode res_smp_cond_load_acquire implementations for arm64 to a custom
* version based on [0]. In rqspinlock code, our conditional expression involves
* checking the value _and_ additionally a timeout. However, on arm64, the
* WFE-based implementation may never spin again if no stores occur to the
* locked byte in the lock word. As such, we may be stuck forever if
* event-stream based unblocking is not available on the platform for WFE spin
* loops (arch_timer_evtstrm_available).
*
* Once support for smp_cond_load_acquire_timewait [0] lands, we can drop this
* copy-paste.
*
* While we rely on the implementation to amortize the cost of sampling
* cond_expr for us, it will not happen when event stream support is
* unavailable, time_expr check is amortized. This is not the common case, and
* it would be difficult to fit our logic in the time_expr_ns >= time_limit_ns
* comparison, hence just let it be. In case of event-stream, the loop is woken
* up at microsecond granularity.
*
* [0]: https://lore.kernel.org/lkml/20250203214911.898276-1-ankur.a.arora@oracle.com
*/

#ifndef smp_cond_load_acquire_timewait

#define smp_cond_time_check_count 200

#define __smp_cond_load_relaxed_spinwait(ptr, cond_expr, time_expr_ns, \
time_limit_ns) ({ \
typeof(ptr) __PTR = (ptr); \
__unqual_scalar_typeof(*ptr) VAL; \
unsigned int __count = 0; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
if (__count++ < smp_cond_time_check_count) \
continue; \
if ((time_expr_ns) >= (time_limit_ns)) \
break; \
__count = 0; \
} \
(typeof(*ptr))VAL; \
})

#define __smp_cond_load_acquire_timewait(ptr, cond_expr, \
time_expr_ns, time_limit_ns) \
({ \
typeof(ptr) __PTR = (ptr); \
__unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = smp_load_acquire(__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
if ((time_expr_ns) >= (time_limit_ns)) \
break; \
} \
(typeof(*ptr))VAL; \
})

#define smp_cond_load_acquire_timewait(ptr, cond_expr, \
time_expr_ns, time_limit_ns) \
({ \
__unqual_scalar_typeof(*ptr) _val; \
int __wfe = arch_timer_evtstrm_available(); \
\
if (likely(__wfe)) { \
_val = __smp_cond_load_acquire_timewait(ptr, cond_expr, \
time_expr_ns, \
time_limit_ns); \
} else { \
_val = __smp_cond_load_relaxed_spinwait(ptr, cond_expr, \
time_expr_ns, \
time_limit_ns); \
smp_acquire__after_ctrl_dep(); \
} \
(typeof(*ptr))_val; \
})

#endif

#define res_smp_cond_load_acquire_timewait(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)

#include <asm-generic/rqspinlock.h>

#endif /* _ASM_RQSPINLOCK_H */
15 changes: 15 additions & 0 deletions kernel/bpf/rqspinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,21 @@ static noinline int check_timeout(struct rqspinlock_timeout *ts)
return 0;
}

/*
* Do not amortize with spins when res_smp_cond_load_acquire is defined,
* as the macro does internal amortization for us.
*/
#ifndef res_smp_cond_load_acquire
#define RES_CHECK_TIMEOUT(ts, ret) \
({ \
if (!(ts).spin++) \
(ret) = check_timeout(&(ts)); \
(ret); \
})
#else
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
({ (ret) = check_timeout(&(ts)); })
#endif

/*
* Initialize the 'spin' member.
Expand All @@ -118,6 +127,12 @@ static noinline int check_timeout(struct rqspinlock_timeout *ts)
*/
static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);

#ifndef res_smp_cond_load_acquire
#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire(v, c)
#endif

#define res_atomic_cond_read_acquire(v, c) res_smp_cond_load_acquire(&(v)->counter, (c))

/**
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
* @lock: Pointer to queued spinlock structure
Expand Down

0 comments on commit ebababc

Please sign in to comment.