Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 145434
b: refs/heads/master
c: c4e51e4
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Jun 1, 2009
1 parent 6ba8bbf commit ae4f14a
Show file tree
Hide file tree
Showing 43 changed files with 617 additions and 178 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d280cc989ad591607e812cd5c5dfde702b5f191a
refs/heads/master: c4e51e465796e60b8416d05572bee57a25e000ae
4 changes: 3 additions & 1 deletion trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,7 @@ M: paulius.zaleckas@teltonika.lt
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
T: git git://gitorious.org/linux-gemini/mainline.git
S: Maintained
F: arch/arm/mach-gemini/

ARM/EBSA110 MACHINE SUPPORT
P: Russell King
Expand All @@ -650,6 +651,7 @@ P: Paulius Zaleckas
M: paulius.zaleckas@teltonika.lt
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
F: arch/arm/mm/*-fa*

ARM/FOOTBRIDGE ARCHITECTURE
P: Russell King
Expand Down Expand Up @@ -2249,7 +2251,7 @@ P: Li Yang
M: leoli@freescale.com
P: Zhang Wei
M: zw@zh-kernel.org
L: linuxppc-embedded@ozlabs.org
L: linuxppc-dev@ozlabs.org
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/dma/fsldma.*
Expand Down
13 changes: 13 additions & 0 deletions trunk/arch/arm/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,16 @@
.align 3; \
.long 9999b,9001f; \
.previous
/*
* SMP data memory barrier
*/
.macro smp_dmb
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
dmb
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
#endif
.endm
61 changes: 52 additions & 9 deletions trunk/arch/arm/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}

static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;

__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}

static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;

smp_mb();

__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
Expand All @@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");

smp_mb();

return result;
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;

__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;

smp_mb();

__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
Expand All @@ -77,13 +115,17 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");

smp_mb();

return result;
}

static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;

smp_mb();

do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
Expand All @@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);

smp_mb();

return oldval;
}

Expand Down Expand Up @@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)

return val;
}
#define atomic_add(i, v) (void) atomic_add_return(i, v)

static inline int atomic_sub_return(int i, atomic_t *v)
{
Expand All @@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)

return val;
}
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
Expand Down Expand Up @@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

#define atomic_add(i, v) (void) atomic_add_return(i, v)
#define atomic_inc(v) (void) atomic_add_return(1, v)
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
#define atomic_dec(v) (void) atomic_sub_return(1, v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)

#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
Expand All @@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)

#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)

/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()

#include <asm-generic/atomic.h>
#endif
Expand Down
176 changes: 176 additions & 0 deletions trunk/arch/arm/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif

smp_mb();

switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
Expand Down Expand Up @@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
smp_mb();

return ret;
}
Expand All @@ -316,6 +319,12 @@ extern void enable_hlt(void);

#include <asm-generic/cmpxchg-local.h>

#if __LINUX_ARM_ARCH__ < 6

#ifdef CONFIG_SMP
#error "SMP is not supported on this platform"
#endif

/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
Expand All @@ -329,6 +338,173 @@ extern void enable_hlt(void);
#include <asm-generic/cmpxchg.h>
#endif

#else /* __LINUX_ARM_ARCH__ >= 6 */

extern void __bad_cmpxchg(volatile void *ptr, int size);

/*
* cmpxchg only support 32-bits operands on ARMv6.
*/

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long oldval, res;

switch (size) {
#ifdef CONFIG_CPU_32v6K
case 1:
do {
asm volatile("@ __cmpxchg1\n"
" ldrexb %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexbeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
case 2:
do {
asm volatile("@ __cmpxchg1\n"
" ldrexh %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexheq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
#endif /* CONFIG_CPU_32v6K */
case 4:
do {
asm volatile("@ __cmpxchg4\n"
" ldrex %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
default:
__bad_cmpxchg(ptr, size);
oldval = 0;
}

return oldval;
}

static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long ret;

smp_mb();
ret = __cmpxchg(ptr, old, new, size);
smp_mb();

return ret;
}

#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))

static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long ret;

switch (size) {
#ifndef CONFIG_CPU_32v6K
case 1:
case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size);
break;
#endif /* !CONFIG_CPU_32v6K */
default:
ret = __cmpxchg(ptr, old, new, size);
}

return ret;
}

#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))

#ifdef CONFIG_CPU_32v6K

/*
* Note : ARMv7-M (currently unsupported by Linux) does not support
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
* not be allowed to use __cmpxchg64.
*/
static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
register unsigned long long oldval asm("r0");
register unsigned long long __old asm("r2") = old;
register unsigned long long __new asm("r4") = new;
unsigned long res;

do {
asm volatile(
" @ __cmpxchg8\n"
" ldrexd %1, %H1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" teqeq %H1, %H3\n"
" strexdeq %0, %4, %H4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (__old), "r" (__new)
: "memory", "cc");
} while (res);

return oldval;
}

static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;

smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();

return ret;
}

#define cmpxchg64(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))

#define cmpxchg64_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))

#else /* !CONFIG_CPU_32v6K */

#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))

#endif /* CONFIG_CPU_32v6K */

#endif /* __LINUX_ARM_ARCH__ >= 6 */

#endif /* __ASSEMBLY__ */

#define arch_align_stack(x) (x)
Expand Down
Loading

0 comments on commit ae4f14a

Please sign in to comment.