Skip to content

Commit

Permalink
locking/local_lock: Introduce localtry_lock_t
Browse files Browse the repository at this point in the history
In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect
critical section, but it doesn't prevent NMI, so the fully reentrant
code cannot use local_lock_irqsave() for exclusive access.

Introduce localtry_lock_t and localtry_lock_irqsave() that
disables interrupts and sets acquired=1, so localtry_lock_irqsave()
from NMI attempting to acquire the same lock will return false.

In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock().
Map localtry_lock_irqsave() to preemptible spin_trylock().
When in hard IRQ or NMI return false right away, since
spin_trylock() is not safe due to explicit locking in the underneath
rt_spin_trylock() implementation. Removing this explicit locking and
attempting only "trylock" is undesired due to PI implications.

Note there is no need to use local_inc for acquired variable,
since it's a percpu variable with strict nesting scopes.

Acked-by: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/r/20250222024427.30294-2-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Sebastian Andrzej Siewior authored and Alexei Starovoitov committed Feb 27, 2025
1 parent 2014c95 commit 0aaddfb
Show file tree
Hide file tree
Showing 2 changed files with 216 additions and 0 deletions.
70 changes: 70 additions & 0 deletions include/linux/local_lock.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,76 @@
#define local_unlock_irqrestore(lock, flags) \
__local_unlock_irqrestore(lock, flags)

/**
* localtry_lock_init - Runtime initialize a lock instance
*/
#define localtry_lock_init(lock) __localtry_lock_init(lock)

/**
* localtry_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
#define localtry_lock(lock) __localtry_lock(lock)

/**
* localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
#define localtry_lock_irq(lock) __localtry_lock_irq(lock)

/**
* localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
* interrupts
* @lock: The lock variable
* @flags: Storage for interrupt flags
*/
#define localtry_lock_irqsave(lock, flags) \
__localtry_lock_irqsave(lock, flags)

/**
* localtry_trylock - Try to acquire a per CPU local lock.
* @lock: The lock variable
*
* The function can be used in any context such as NMI or HARDIRQ. Due to
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
#define localtry_trylock(lock) __localtry_trylock(lock)

/**
* localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
* interrupts if acquired
* @lock: The lock variable
* @flags: Storage for interrupt flags
*
* The function can be used in any context such as NMI or HARDIRQ. Due to
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
#define localtry_trylock_irqsave(lock, flags) \
__localtry_trylock_irqsave(lock, flags)

/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
#define localtry_unlock(lock) __localtry_unlock(lock)

/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock)

/**
* localtry_unlock_irqrestore - Release a per CPU local lock and restore
* interrupt flags
* @lock: The lock variable
* @flags: Interrupt flags to restore
*/
#define localtry_unlock_irqrestore(lock, flags) \
__localtry_unlock_irqrestore(lock, flags)

DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
local_unlock(_T))
Expand Down
146 changes: 146 additions & 0 deletions include/linux/local_lock_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ typedef struct {
#endif
} local_lock_t;

typedef struct {
local_lock_t llock;
unsigned int acquired;
} localtry_lock_t;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
Expand All @@ -31,6 +36,13 @@ static inline void local_lock_acquire(local_lock_t *l)
l->owner = current;
}

static inline void local_trylock_acquire(local_lock_t *l)
{
lock_map_acquire_try(&l->dep_map);
DEBUG_LOCKS_WARN_ON(l->owner);
l->owner = current;
}

static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
Expand All @@ -45,11 +57,13 @@ static inline void local_lock_debug_init(local_lock_t *l)
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */

#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}

#define __local_lock_init(lock) \
do { \
Expand Down Expand Up @@ -118,15 +132,115 @@ do { \
#define __local_unlock_nested_bh(lock) \
local_lock_release(this_cpu_ptr(lock))

/* localtry_lock_t variants */

#define __localtry_lock_init(lock) \
do { \
__local_lock_init(&(lock)->llock); \
WRITE_ONCE((lock)->acquired, 0); \
} while (0)

#define __localtry_lock(lock) \
do { \
localtry_lock_t *lt; \
preempt_disable(); \
lt = this_cpu_ptr(lock); \
local_lock_acquire(&lt->llock); \
WRITE_ONCE(lt->acquired, 1); \
} while (0)

#define __localtry_lock_irq(lock) \
do { \
localtry_lock_t *lt; \
local_irq_disable(); \
lt = this_cpu_ptr(lock); \
local_lock_acquire(&lt->llock); \
WRITE_ONCE(lt->acquired, 1); \
} while (0)

#define __localtry_lock_irqsave(lock, flags) \
do { \
localtry_lock_t *lt; \
local_irq_save(flags); \
lt = this_cpu_ptr(lock); \
local_lock_acquire(&lt->llock); \
WRITE_ONCE(lt->acquired, 1); \
} while (0)

#define __localtry_trylock(lock) \
({ \
localtry_lock_t *lt; \
bool _ret; \
\
preempt_disable(); \
lt = this_cpu_ptr(lock); \
if (!READ_ONCE(lt->acquired)) { \
WRITE_ONCE(lt->acquired, 1); \
local_trylock_acquire(&lt->llock); \
_ret = true; \
} else { \
_ret = false; \
preempt_enable(); \
} \
_ret; \
})

#define __localtry_trylock_irqsave(lock, flags) \
({ \
localtry_lock_t *lt; \
bool _ret; \
\
local_irq_save(flags); \
lt = this_cpu_ptr(lock); \
if (!READ_ONCE(lt->acquired)) { \
WRITE_ONCE(lt->acquired, 1); \
local_trylock_acquire(&lt->llock); \
_ret = true; \
} else { \
_ret = false; \
local_irq_restore(flags); \
} \
_ret; \
})

#define __localtry_unlock(lock) \
do { \
localtry_lock_t *lt; \
lt = this_cpu_ptr(lock); \
WRITE_ONCE(lt->acquired, 0); \
local_lock_release(&lt->llock); \
preempt_enable(); \
} while (0)

#define __localtry_unlock_irq(lock) \
do { \
localtry_lock_t *lt; \
lt = this_cpu_ptr(lock); \
WRITE_ONCE(lt->acquired, 0); \
local_lock_release(&lt->llock); \
local_irq_enable(); \
} while (0)

#define __localtry_unlock_irqrestore(lock, flags) \
do { \
localtry_lock_t *lt; \
lt = this_cpu_ptr(lock); \
WRITE_ONCE(lt->acquired, 0); \
local_lock_release(&lt->llock); \
local_irq_restore(flags); \
} while (0)

#else /* !CONFIG_PREEMPT_RT */

/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
typedef spinlock_t localtry_lock_t;

#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)

#define __local_lock_init(l) \
do { \
Expand Down Expand Up @@ -169,4 +283,36 @@ do { \
spin_unlock(this_cpu_ptr((lock))); \
} while (0)

/* localtry_lock_t variants */

#define __localtry_lock_init(lock) __local_lock_init(lock)
#define __localtry_lock(lock) __local_lock(lock)
#define __localtry_lock_irq(lock) __local_lock(lock)
#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags)
#define __localtry_unlock(lock) __local_unlock(lock)
#define __localtry_unlock_irq(lock) __local_unlock(lock)
#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags)

#define __localtry_trylock(lock) \
({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
__locked = 0; \
} else { \
migrate_disable(); \
__locked = spin_trylock(this_cpu_ptr((lock))); \
if (!__locked) \
migrate_enable(); \
} \
__locked; \
})

#define __localtry_trylock_irqsave(lock, flags) \
({ \
typecheck(unsigned long, flags); \
flags = 0; \
__localtry_trylock(lock); \
})

#endif /* CONFIG_PREEMPT_RT */

0 comments on commit 0aaddfb

Please sign in to comment.