Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 145405
b: refs/heads/master
c: a35197a
h: refs/heads/master
i:
  145403: 03a1f36
v: v3
  • Loading branch information
Russell King authored and Russell King committed May 29, 2009
1 parent 6437cc4 commit 3e4e0e5
Show file tree
Hide file tree
Showing 26 changed files with 460 additions and 66 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 67a433ce278b98f47272726a22537fab7fd99de9
refs/heads/master: a35197a8be891072b3654dc7a2285573150dedee
13 changes: 13 additions & 0 deletions trunk/arch/arm/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,16 @@
.align 3; \
.long 9999b,9001f; \
.previous
/*
* SMP data memory barrier
*/
.macro smp_dmb
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
dmb
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
#endif
.endm
61 changes: 52 additions & 9 deletions trunk/arch/arm/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}

static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;

__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}

static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;

smp_mb();

__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
Expand All @@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");

smp_mb();

return result;
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;

__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;

smp_mb();

__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
Expand All @@ -77,13 +115,17 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");

smp_mb();

return result;
}

static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;

smp_mb();

do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
Expand All @@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);

smp_mb();

return oldval;
}

Expand Down Expand Up @@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)

return val;
}
#define atomic_add(i, v) (void) atomic_add_return(i, v)

static inline int atomic_sub_return(int i, atomic_t *v)
{
Expand All @@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)

return val;
}
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
Expand Down Expand Up @@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

#define atomic_add(i, v) (void) atomic_add_return(i, v)
#define atomic_inc(v) (void) atomic_add_return(1, v)
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
#define atomic_dec(v) (void) atomic_sub_return(1, v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)

#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
Expand All @@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)

#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)

/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()

#include <asm-generic/atomic.h>
#endif
Expand Down
176 changes: 176 additions & 0 deletions trunk/arch/arm/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif

smp_mb();

switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
Expand Down Expand Up @@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
smp_mb();

return ret;
}
Expand All @@ -316,6 +319,12 @@ extern void enable_hlt(void);

#include <asm-generic/cmpxchg-local.h>

#if __LINUX_ARM_ARCH__ < 6

#ifdef CONFIG_SMP
#error "SMP is not supported on this platform"
#endif

/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
Expand All @@ -329,6 +338,173 @@ extern void enable_hlt(void);
#include <asm-generic/cmpxchg.h>
#endif

#else /* __LINUX_ARM_ARCH__ >= 6 */

extern void __bad_cmpxchg(volatile void *ptr, int size);

/*
* cmpxchg only support 32-bits operands on ARMv6.
*/

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long oldval, res;

switch (size) {
#ifdef CONFIG_CPU_32v6K
case 1:
do {
asm volatile("@ __cmpxchg1\n"
" ldrexb %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexbeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
case 2:
do {
asm volatile("@ __cmpxchg1\n"
" ldrexh %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexheq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
#endif /* CONFIG_CPU_32v6K */
case 4:
do {
asm volatile("@ __cmpxchg4\n"
" ldrex %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" strexeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (old), "r" (new)
: "memory", "cc");
} while (res);
break;
default:
__bad_cmpxchg(ptr, size);
oldval = 0;
}

return oldval;
}

static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long ret;

smp_mb();
ret = __cmpxchg(ptr, old, new, size);
smp_mb();

return ret;
}

#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))

static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long ret;

switch (size) {
#ifndef CONFIG_CPU_32v6K
case 1:
case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size);
break;
#endif /* !CONFIG_CPU_32v6K */
default:
ret = __cmpxchg(ptr, old, new, size);
}

return ret;
}

#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))

#ifdef CONFIG_CPU_32v6K

/*
* Note : ARMv7-M (currently unsupported by Linux) does not support
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
* not be allowed to use __cmpxchg64.
*/
static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
register unsigned long long oldval asm("r0");
register unsigned long long __old asm("r2") = old;
register unsigned long long __new asm("r4") = new;
unsigned long res;

do {
asm volatile(
" @ __cmpxchg8\n"
" ldrexd %1, %H1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" teqeq %H1, %H3\n"
" strexdeq %0, %4, %H4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (__old), "r" (__new)
: "memory", "cc");
} while (res);

return oldval;
}

static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;

smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();

return ret;
}

#define cmpxchg64(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))

#define cmpxchg64_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))

#else /* !CONFIG_CPU_32v6K */

#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))

#endif /* CONFIG_CPU_32v6K */

#endif /* __LINUX_ARM_ARCH__ >= 6 */

#endif /* __ASSEMBLY__ */

#define arch_align_stack(x) (x)
Expand Down
9 changes: 9 additions & 0 deletions trunk/arch/arm/kernel/elf.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,15 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
return 1;
if (cpu_architecture() < CPU_ARCH_ARMv6)
return 1;
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
/*
* If we have support for OABI programs, we can never allow NX
* support - our signal syscall restart mechanism relies upon
* being able to execute code placed on the user stack.
*/
return 1;
#else
return 0;
#endif
}
EXPORT_SYMBOL(arm_elf_read_implies_exec);
5 changes: 1 addition & 4 deletions trunk/arch/arm/kernel/entry-armv.S
Original file line number Diff line number Diff line change
Expand Up @@ -815,10 +815,7 @@ __kuser_helper_start:
*/

__kuser_memory_barrier: @ 0xffff0fa0

#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
smp_dmb
usr_ret lr

.align 5
Expand Down
Loading

0 comments on commit 3e4e0e5

Please sign in to comment.