Skip to content

Commit

Permalink
powerpc/qspinlock: store owner CPU in lock word
Browse files Browse the repository at this point in the history
Store the owner CPU number in the lock word so it may be yielded to,
as powerpc's paravirtualised simple spinlocks do.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221126095932.1234527-7-npiggin@gmail.com
  • Loading branch information
Nicholas Piggin authored and Michael Ellerman committed Dec 2, 2022
1 parent 0944534 commit e1a31e7
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 4 deletions.
12 changes: 10 additions & 2 deletions arch/powerpc/include/asm/qspinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,15 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK);
}

static __always_inline u32 queued_spin_encode_locked_val(void)
{
/* XXX: make this use lock value in paca like simple spinlocks? */
return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
}

static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
u32 new = queued_spin_encode_locked_val();
u32 prev;

asm volatile(
Expand All @@ -34,7 +41,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (prev)
: "r" (&lock->val), "r" (_Q_LOCKED_VAL),
: "r" (&lock->val), "r" (new),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");

Expand All @@ -43,6 +50,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)

static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
{
u32 new = queued_spin_encode_locked_val();
u32 prev, tmp;

/* Trylock may get ahead of queued nodes if it finds unlocked */
Expand All @@ -57,7 +65,7 @@ static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (prev), "=&r" (tmp)
: "r" (&lock->val), "r" (_Q_LOCKED_VAL), "r" (_Q_TAIL_CPU_MASK),
: "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");

Expand Down
12 changes: 11 additions & 1 deletion arch/powerpc/include/asm/qspinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ typedef struct qspinlock {
* Bitfields in the lock word:
*
* 0: locked bit
* 1-15: unused bits
* 1-14: lock holder cpu
* 15: unused bit
* 16: must queue bit
* 17-31: tail cpu (+1)
*/
Expand All @@ -40,6 +41,15 @@ typedef struct qspinlock {
#define _Q_LOCKED_BITS 1
#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)

/* 0x00007ffe */
#define _Q_OWNER_CPU_OFFSET 1
#define _Q_OWNER_CPU_BITS 14
#define _Q_OWNER_CPU_MASK _Q_SET_MASK(OWNER_CPU)

#if CONFIG_NR_CPUS > (1U << _Q_OWNER_CPU_BITS)
#error "qspinlock does not support such large CONFIG_NR_CPUS"
#endif

/* 0x00010000 */
#define _Q_MUST_Q_OFFSET 16
#define _Q_MUST_Q_BITS 1
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/lib/qspinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static inline int decode_tail_cpu(u32 val)
*/
static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail)
{
u32 newval = _Q_LOCKED_VAL;
u32 newval = queued_spin_encode_locked_val();
u32 prev, tmp;

asm volatile(
Expand Down

0 comments on commit e1a31e7

Please sign in to comment.