Skip to content

Commit

Permalink
x86, ticketlock: Make __ticket_spin_trylock common
Browse files Browse the repository at this point in the history
Make trylock code common regardless of ticket size.

(Also, rename arch_spinlock.slock to head_tail.)

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Jeremy Fitzhardinge authored and H. Peter Anvin committed Aug 29, 2011
1 parent 2994488 commit 229855d
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 41 deletions.
51 changes: 12 additions & 39 deletions arch/x86/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,60 +69,33 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
barrier(); /* make sure nothing creeps before the lock is taken */
}

#if (NR_CPUS < 256)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp, new;

asm volatile("movzwl %2, %0\n\t"
"cmpb %h0,%b0\n\t"
"leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
: "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
arch_spinlock_t old, new;

old.tickets = ACCESS_ONCE(lock->tickets);
if (old.tickets.head != old.tickets.tail)
return 0;

return tmp;
new.head_tail = old.head_tail + (1 << TICKET_SHIFT);

/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

#if (NR_CPUS < 256)
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->slock)
: "+m" (lock->head_tail)
:
: "memory", "cc");
}
#else
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
unsigned tmp;
unsigned new;

asm volatile("movl %2,%0\n\t"
"movl %0,%1\n\t"
"roll $16, %0\n\t"
"cmpl %0,%1\n\t"
"leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
LOCK_PREFIX "cmpxchgl %1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
: "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");

return tmp;
}

static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->slock)
: "+m" (lock->head_tail)
:
: "memory", "cc");
}
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,25 @@

#if (CONFIG_NR_CPUS < 256)
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif

#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
#define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))

typedef struct arch_spinlock {
union {
unsigned int slock;
__ticketpair_t head_tail;
struct __raw_tickets {
__ticket_t head, tail;
} tickets;
};
} arch_spinlock_t;

#define __ARCH_SPIN_LOCK_UNLOCKED { { .slock = 0 } }
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }

#include <asm/rwlock.h>

Expand Down

0 comments on commit 229855d

Please sign in to comment.