Skip to content

Commit

Permalink
x86, ticketlock: Collapse a layer of functions
Browse files Browse the repository at this point in the history
Now that the paravirtualization layer doesn't exist at the spinlock
level any more, we can collapse the __ticket_ functions into the arch_
functions.

Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-4-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Jeremy Fitzhardinge authored and H. Peter Anvin committed Aug 9, 2013
1 parent 8db7326 commit b798df0
Showing 1 changed file with 5 additions and 30 deletions.
35 changes: 5 additions & 30 deletions arch/x86/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
* in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part.
*/
static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
register struct __raw_tickets inc = { .tail = 1 };

Expand All @@ -96,7 +96,7 @@ static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
out: barrier(); /* make sure nothing creeps before the lock is taken */
}

static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
arch_spinlock_t old, new;

Expand All @@ -110,54 +110,29 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}

static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_t next = lock->tickets.head + 1;

__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
__ticket_unlock_kick(lock, next);
}

static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

return tmp.tail != tmp.head;
}

static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

return (__ticket_t)(tmp.tail - tmp.head) > 1;
}

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}

static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
#define arch_spin_is_contended arch_spin_is_contended

static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}

static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}

static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}

static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
Expand Down

0 comments on commit b798df0

Please sign in to comment.