Skip to content

Commit

Permalink
x86: consolidate spinlock.h
Browse files Browse the repository at this point in the history
The cli and sti instructions need to be replaced by paravirt hooks.
For the i386 architecture, this is already done. The code requirements
aren't much different from x86_64 POV, so this part is consolidated in
the common header

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Glauber de Oliveira Costa authored and Ingo Molnar committed Jan 30, 2008
1 parent 6abcd98 commit 2fed0c5
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 58 deletions.
14 changes: 14 additions & 0 deletions include/asm-x86/spinlock.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,19 @@
#ifndef _X86_SPINLOCK_H_
#define _X86_SPINLOCK_H_

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define CLI_STRING "cli"
#define STI_STRING "sti"
#define CLI_STI_CLOBBERS
#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */

#ifdef CONFIG_X86_32
# include "spinlock_32.h"
#else
# include "spinlock_64.h"
#endif

#endif
71 changes: 26 additions & 45 deletions include/asm-x86/spinlock_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,6 @@
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define CLI_STRING "cli"
#define STI_STRING "sti"
#define CLI_STI_CLOBBERS
#define CLI_STI_INPUT_ARGS
#endif /* CONFIG_PARAVIRT */

/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
Expand All @@ -27,23 +17,24 @@
* (the type definitions are in asm/spinlock_types.h)
*/

static inline int __raw_spin_is_locked(raw_spinlock_t *x)
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
return *(volatile signed char *)(&(lock)->slock) <= 0;
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
asm volatile("\n1:\t"
LOCK_PREFIX " ; decb %0\n\t"
"jns 3f\n"
"2:\t"
"rep;nop\n\t"
"cmpb $0,%0\n\t"
"jle 2b\n\t"
"jmp 1b\n"
"3:\n\t"
: "+m" (lock->slock) : : "memory");
asm volatile(
"\n1:\t"
LOCK_PREFIX " ; decb %0\n\t"
"jns 3f\n"
"2:\t"
"rep;nop\n\t"
"cmpb $0,%0\n\t"
"jle 2b\n\t"
"jmp 1b\n"
"3:\n\t"
: "+m" (lock->slock) : : "memory");
}

/*
Expand All @@ -55,7 +46,8 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
* irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
*/
#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
unsigned long flags)
{
asm volatile(
"\n1:\t"
Expand All @@ -79,18 +71,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
"5:\n\t"
: [slock] "+m" (lock->slock)
: [flags] "r" (flags)
CLI_STI_INPUT_ARGS
CLI_STI_INPUT_ARGS
: "memory" CLI_STI_CLOBBERS);
}
#endif

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
char oldval;
signed char oldval;

asm volatile(
"xchgb %b0,%1"
:"=q" (oldval), "+m" (lock->slock)
:"0" (0) : "memory");

return oldval > 0;
}

Expand All @@ -112,7 +106,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
char oldval = 1;
unsigned char oldval = 1;

asm volatile("xchgb %b0, %1"
: "=q" (oldval), "+m" (lock->slock)
Expand All @@ -139,31 +133,16 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
*
* On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
*
* The inline assembly is non-obvious. Think about it.
*
* Changed to use the same technique as rw semaphores. See
* semaphore.h for details. -ben
*
* the helpers are in arch/i386/kernel/semaphore.c
*/

/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int __raw_read_can_lock(raw_rwlock_t *x)
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
return (int)(x)->lock > 0;
return (int)(lock)->lock > 0;
}

/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
static inline int __raw_write_can_lock(raw_rwlock_t *x)
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
return (x)->lock == RW_LOCK_BIAS;
return (lock)->lock == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(raw_rwlock_t *rw)
Expand All @@ -187,6 +166,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;

atomic_dec(count);
if (atomic_read(count) >= 0)
return 1;
Expand All @@ -197,6 +177,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;

if (atomic_sub_and_test(RW_LOCK_BIAS, count))
return 1;
atomic_add(RW_LOCK_BIAS, count);
Expand Down
37 changes: 24 additions & 13 deletions include/asm-x86/spinlock_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,35 +33,44 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
"cmpl $0,%0\n\t"
"jle 3b\n\t"
"jmp 1b\n"
"2:\t" : "=m" (lock->slock) : : "memory");
"2:\t"
: "=m" (lock->slock) : : "memory");
}

/*
* Same as __raw_spin_lock, but reenable interrupts during spinning.
* It is easier for the lock validator if interrupts are not re-enabled
* in the middle of a lock-acquire. This is a performance feature anyway
* so we turn it off:
*
* NOTE: there's an irqs-on section here, which normally would have to be
* irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
*/
#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
unsigned long flags)
{
asm volatile(
"\n1:\t"
LOCK_PREFIX " ; decl %0\n\t"
"jns 5f\n"
"testl $0x200, %1\n\t" /* interrupts were disabled? */
"jz 4f\n\t"
"sti\n"
STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
"cmpl $0, %0\n\t"
"jle 3b\n\t"
"cli\n\t"
CLI_STRING "\n\t"
"jmp 1b\n"
"4:\t"
"rep;nop\n\t"
"cmpl $0, %0\n\t"
"jg 1b\n\t"
"jmp 4b\n"
"5:\n\t"
: "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
: "+m" (lock->slock)
: "r" ((unsigned)flags) CLI_STI_INPUT_ARGS
: "memory" CLI_STI_CLOBBERS);
}
#endif

Expand All @@ -79,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
asm volatile("movl $1,%0" : "=m" (lock->slock) :: "memory");
}

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
Expand Down Expand Up @@ -114,25 +123,26 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)

static inline void __raw_read_lock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t"
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
"call __read_lock_failed\n"
"call __read_lock_failed\n\t"
"1:\n"
::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
}

static inline void __raw_write_lock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t"
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
"\tcall __write_lock_failed\n\t"
"call __write_lock_failed\n\t"
"1:\n"
::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
}

static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;

atomic_dec(count);
if (atomic_read(count) >= 0)
return 1;
Expand All @@ -143,6 +153,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;

if (atomic_sub_and_test(RW_LOCK_BIAS, count))
return 1;
atomic_add(RW_LOCK_BIAS, count);
Expand All @@ -151,12 +162,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",%0"
: "=m" (rw->lock) : : "memory");
}

Expand Down

0 comments on commit 2fed0c5

Please sign in to comment.