Skip to content

Commit

Permalink
i386: improve and correct inline asm memory constraints
Browse files Browse the repository at this point in the history
Use "+m" rather than a combination of "=m" and "m" for improved clarity
and consistency.

This also fixes some inlines that incorrectly didn't tell the compiler
that they read the old value at all, potentially causing the compiler to
generate bogus code.  It appear that all of those potential bugs were
hidden by the use of extra "volatile" specifiers on the data structures
in question, though.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Linus Torvalds committed Jul 8, 2006
1 parent e2a3d40 commit b862f3b
Show file tree
Hide file tree
Showing 8 changed files with 57 additions and 62 deletions.
30 changes: 14 additions & 16 deletions include/asm-i386/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
:"+m" (v->counter)
:"ir" (i));
}

/**
Expand All @@ -61,8 +61,8 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
:"+m" (v->counter)
:"ir" (i));
}

/**
Expand All @@ -80,8 +80,8 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)

__asm__ __volatile__(
LOCK_PREFIX "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
:"+m" (v->counter), "=qm" (c)
:"ir" (i) : "memory");
return c;
}

Expand All @@ -95,8 +95,7 @@ static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
:"+m" (v->counter));
}

/**
Expand All @@ -109,8 +108,7 @@ static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
:"+m" (v->counter));
}

/**
Expand All @@ -127,8 +125,8 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)

__asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
:"+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}

Expand All @@ -146,8 +144,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)

__asm__ __volatile__(
LOCK_PREFIX "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
:"+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}

Expand All @@ -166,8 +164,8 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)

__asm__ __volatile__(
LOCK_PREFIX "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
:"+m" (v->counter), "=qm" (c)
:"ir" (i) : "memory");
return c;
}

Expand Down
10 changes: 5 additions & 5 deletions include/asm-i386/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
.align 8\n\
.long 1b,3b\n\
.previous" \
: "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
: "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0))

#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
Expand All @@ -38,9 +38,9 @@
.align 8\n\
.long 1b,4b,2b,4b\n\
.previous" \
: "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
: "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
"=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
: "r" (oparg), "i" (-EFAULT), "1" (0))

static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
Expand Down Expand Up @@ -123,7 +123,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .long 1b,3b \n"
" .previous \n"

: "=a" (oldval), "=m" (*uaddr)
: "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
Expand Down
14 changes: 6 additions & 8 deletions include/asm-i386/local.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,32 +17,30 @@ static __inline__ void local_inc(local_t *v)
{
__asm__ __volatile__(
"incl %0"
:"=m" (v->counter)
:"m" (v->counter));
:"+m" (v->counter));
}

static __inline__ void local_dec(local_t *v)
{
__asm__ __volatile__(
"decl %0"
:"=m" (v->counter)
:"m" (v->counter));
:"+m" (v->counter));
}

static __inline__ void local_add(long i, local_t *v)
{
__asm__ __volatile__(
"addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
:"+m" (v->counter)
:"ir" (i));
}

static __inline__ void local_sub(long i, local_t *v)
{
__asm__ __volatile__(
"subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
:"+m" (v->counter)
:"ir" (i));
}

/* On x86, these are no better than the atomic variants. */
Expand Down
4 changes: 2 additions & 2 deletions include/asm-i386/posix_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,12 @@ typedef struct {
#undef __FD_SET
#define __FD_SET(fd,fdsetp) \
__asm__ __volatile__("btsl %1,%0": \
"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
"+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))

#undef __FD_CLR
#define __FD_CLR(fd,fdsetp) \
__asm__ __volatile__("btrl %1,%0": \
"=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
"+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))

#undef __FD_ISSET
#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
Expand Down
4 changes: 2 additions & 2 deletions include/asm-i386/rwlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
"popl %%eax\n\t" \
"1:\n", \
"subl $1,%0\n\t", \
"=m" (*(volatile int *)rw) : : "memory")
"+m" (*(volatile int *)rw) : : "memory")

#define __build_read_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
Expand All @@ -63,7 +63,7 @@
"popl %%eax\n\t" \
"1:\n", \
"subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
"=m" (*(volatile int *)rw) : : "memory")
"+m" (*(volatile int *)rw) : : "memory")

#define __build_write_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
Expand Down
35 changes: 17 additions & 18 deletions include/asm-i386/rwsem.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
" jmp 1b\n"
LOCK_SECTION_END
"# ending down_read\n\t"
: "=m"(sem->count)
: "a"(sem), "m"(sem->count)
: "+m" (sem->count)
: "a" (sem)
: "memory", "cc");
}

Expand All @@ -133,8 +133,8 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
: "+m"(sem->count), "=&a"(result), "=&r"(tmp)
: "i"(RWSEM_ACTIVE_READ_BIAS)
: "+m" (sem->count), "=&a" (result), "=&r" (tmp)
: "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result>=0 ? 1 : 0;
}
Expand All @@ -161,8 +161,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
" jmp 1b\n"
LOCK_SECTION_END
"# ending down_write"
: "=m"(sem->count), "=d"(tmp)
: "a"(sem), "1"(tmp), "m"(sem->count)
: "+m" (sem->count), "=d" (tmp)
: "a" (sem), "1" (tmp)
: "memory", "cc");
}

Expand Down Expand Up @@ -205,8 +205,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu
" jmp 1b\n"
LOCK_SECTION_END
"# ending __up_read\n"
: "=m"(sem->count), "=d"(tmp)
: "a"(sem), "1"(tmp), "m"(sem->count)
: "+m" (sem->count), "=d" (tmp)
: "a" (sem), "1" (tmp)
: "memory", "cc");
}

Expand All @@ -231,8 +231,8 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
" jmp 1b\n"
LOCK_SECTION_END
"# ending __up_write\n"
: "=m"(sem->count)
: "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
: "+m" (sem->count)
: "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc", "edx");
}

Expand All @@ -256,8 +256,8 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001
" jmp 1b\n"
LOCK_SECTION_END
"# ending __downgrade_write\n"
: "=m"(sem->count)
: "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
: "+m" (sem->count)
: "a" (sem), "i" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}

Expand All @@ -268,8 +268,8 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
__asm__ __volatile__(
LOCK_PREFIX "addl %1,%0"
: "=m"(sem->count)
: "ir"(delta), "m"(sem->count));
: "+m" (sem->count)
: "ir" (delta));
}

/*
Expand All @@ -280,10 +280,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
int tmp = delta;

__asm__ __volatile__(
LOCK_PREFIX "xadd %0,(%2)"
: "+r"(tmp), "=m"(sem->count)
: "r"(sem), "m"(sem->count)
: "memory");
LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
: : "memory");

return tmp+delta;
}
Expand Down
8 changes: 4 additions & 4 deletions include/asm-i386/semaphore.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ static inline void down(struct semaphore * sem)
"call __down_failed\n\t"
"jmp 1b\n"
LOCK_SECTION_END
:"=m" (sem->count)
:"+m" (sem->count)
:
:"memory","ax");
}
Expand All @@ -132,7 +132,7 @@ static inline int down_interruptible(struct semaphore * sem)
"call __down_failed_interruptible\n\t"
"jmp 1b\n"
LOCK_SECTION_END
:"=a" (result), "=m" (sem->count)
:"=a" (result), "+m" (sem->count)
:
:"memory");
return result;
Expand All @@ -157,7 +157,7 @@ static inline int down_trylock(struct semaphore * sem)
"call __down_failed_trylock\n\t"
"jmp 1b\n"
LOCK_SECTION_END
:"=a" (result), "=m" (sem->count)
:"=a" (result), "+m" (sem->count)
:
:"memory");
return result;
Expand All @@ -182,7 +182,7 @@ static inline void up(struct semaphore * sem)
"jmp 1b\n"
LOCK_SECTION_END
".subsection 0\n"
:"=m" (sem->count)
:"+m" (sem->count)
:
:"memory","ax");
}
Expand Down
14 changes: 7 additions & 7 deletions include/asm-i386/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
alternative_smp(
__raw_spin_lock_string,
__raw_spin_lock_string_up,
"=m" (lock->slock) : : "memory");
"+m" (lock->slock) : : "memory");
}

/*
Expand All @@ -79,7 +79,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
alternative_smp(
__raw_spin_lock_string_flags,
__raw_spin_lock_string_up,
"=m" (lock->slock) : "r" (flags) : "memory");
"+m" (lock->slock) : "r" (flags) : "memory");
}
#endif

Expand All @@ -88,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
:"=q" (oldval), "=m" (lock->slock)
:"=q" (oldval), "+m" (lock->slock)
:"0" (0) : "memory");
return oldval > 0;
}
Expand All @@ -104,7 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)

#define __raw_spin_unlock_string \
"movb $1,%0" \
:"=m" (lock->slock) : : "memory"
:"+m" (lock->slock) : : "memory"


static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Expand All @@ -118,7 +118,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)

#define __raw_spin_unlock_string \
"xchgb %b0, %1" \
:"=q" (oldval), "=m" (lock->slock) \
:"=q" (oldval), "+m" (lock->slock) \
:"0" (oldval) : "memory"

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Expand Down Expand Up @@ -199,13 +199,13 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
: "=m" (rw->lock) : : "memory");
: "+m" (rw->lock) : : "memory");
}

#endif /* __ASM_SPINLOCK_H */

0 comments on commit b862f3b

Please sign in to comment.