Skip to content

Commit

Permalink
Merge branch 'v6v7' into devel
Browse files Browse the repository at this point in the history
Conflicts:
	arch/arm/include/asm/cacheflush.h
	arch/arm/include/asm/proc-fns.h
	arch/arm/mm/Kconfig
  • Loading branch information
Russell King committed Mar 16, 2011
2 parents 1f0090a + 3ba6e69 commit bd1274d
Show file tree
Hide file tree
Showing 28 changed files with 191 additions and 189 deletions.
14 changes: 8 additions & 6 deletions arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ config ARM
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
Expand All @@ -24,7 +24,7 @@ config ARM
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
Expand Down Expand Up @@ -456,6 +456,7 @@ config ARCH_IXP4XX

config ARCH_DOVE
bool "Marvell Dove"
select CPU_V6K
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
Expand Down Expand Up @@ -1059,7 +1060,7 @@ config XSCALE_PMU
default y

config CPU_HAS_PMU
depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \
depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
(!ARCH_OMAP3 || OMAP3_EMU)
default y
bool
Expand All @@ -1075,7 +1076,7 @@ endif

config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
depends on CPU_V6
depends on CPU_V6 || CPU_V6K
help
Invalidation of the Instruction Cache operation can
fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
Expand Down Expand Up @@ -1318,6 +1319,7 @@ source "kernel/time/Kconfig"
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on CPU_V6K || CPU_V7
depends on GENERIC_CLOCKEVENTS
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
Expand Down Expand Up @@ -1429,7 +1431,7 @@ config HZ

config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
select AEABI
select ARM_ASM_UNIFIED
help
Expand Down Expand Up @@ -1963,7 +1965,7 @@ config FPE_FASTFPE

config VFP
bool "VFP-format floating point maths"
depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
help
Say Y to include VFP support code in the kernel. This is needed
if your hardware includes a VFP unit.
Expand Down
1 change: 1 addition & 0 deletions arch/arm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)

ifeq ($(CONFIG_AEABI),y)
CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/compressed/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

#if defined(CONFIG_DEBUG_ICEDCC)

#ifdef CONFIG_CPU_V6
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/compressed/misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ extern void error(char *x);

#ifdef CONFIG_DEBUG_ICEDCC

#ifdef CONFIG_CPU_V6
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)

static void icedcc_putc(int ch)
{
Expand Down
60 changes: 22 additions & 38 deletions arch/arm/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
* Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
*/

/*
* Native endian assembly bitops. nr = 0 -> word 0 bit 0.
*/
extern void _set_bit(int nr, volatile unsigned long * p);
extern void _clear_bit(int nr, volatile unsigned long * p);
extern void _change_bit(int nr, volatile unsigned long * p);
extern int _test_and_set_bit(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
extern int _test_and_change_bit(int nr, volatile unsigned long * p);

/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
extern void _set_bit_le(int nr, volatile unsigned long * p);
extern void _clear_bit_le(int nr, volatile unsigned long * p);
extern void _change_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_le(const void * p, unsigned size);
extern int _find_next_zero_bit_le(const void * p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
Expand All @@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
extern void _set_bit_be(int nr, volatile unsigned long * p);
extern void _clear_bit_be(int nr, volatile unsigned long * p);
extern void _change_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_be(const void * p, unsigned size);
extern int _find_next_zero_bit_be(const void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
Expand All @@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
/*
* The __* form of bitops are non-atomic and may be reordered.
*/
#define ATOMIC_BITOP_LE(name,nr,p) \
(__builtin_constant_p(nr) ? \
____atomic_##name(nr, p) : \
_##name##_le(nr,p))

#define ATOMIC_BITOP_BE(name,nr,p) \
(__builtin_constant_p(nr) ? \
____atomic_##name(nr, p) : \
_##name##_be(nr,p))
#define ATOMIC_BITOP(name,nr,p) \
(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
#else
#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p)
#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p)
#define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
#endif

#define NONATOMIC_BITOP(name,nr,p) \
(____nonatomic_##name(nr, p))
/*
* Native endian atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)

#ifndef __ARMEB__
/*
* These are the little endian, atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
Expand All @@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define WORD_BITOFF_TO_LE(x) ((x))

#else

/*
* These are the big endian, atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
Expand Down
3 changes: 2 additions & 1 deletion arch/arm/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
* will fall through to use __flush_icache_all_generic.
*/
#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
#if (defined(CONFIG_CPU_V7) && \
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
defined(CONFIG_SMP_ON_UP)
#define __flush_icache_preferred __cpuc_flush_icache_all
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/include/asm/glue-cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@
# define MULTI_CACHE 1
#endif

#if defined(CONFIG_CPU_V6)
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/include/asm/glue-proc.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@
# endif
#endif

#ifdef CONFIG_CPU_V6
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
Expand Down
53 changes: 41 additions & 12 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,52 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif

/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
#define ALT_SMP(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 9998b\n" \
" " up "\n" \
" .popsection\n"

#ifdef CONFIG_THUMB2_KERNEL
#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
* the SMP_ON_UP fixup code). By itself "wfene" might cause the
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
* To avoid this unpredictableness, an approprite IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
#define WFE(cond) ALT_SMP( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
#define SEV ALT_SMP("sev", "nop")
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
#endif

static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb\n"
"sev"
SEV
);
#elif defined(CONFIG_CPU_32v6K)
#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
"sev"
SEV
: : "r" (0)
);
#endif
Expand Down Expand Up @@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
WFE("ne")
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
Expand Down Expand Up @@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K
" wfene\n"
#endif
WFE("ne")
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
Expand Down Expand Up @@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
#ifdef CONFIG_CPU_32v6K
" wfemi\n"
#endif
WFE("mi")
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
Expand Down
17 changes: 9 additions & 8 deletions arch/arm/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg-local.h>

#if __LINUX_ARM_ARCH__ < 6
/* min ARCH < ARMv6 */

#ifdef CONFIG_SMP
#error "SMP is not supported on this platform"
Expand All @@ -365,7 +366,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg.h>
#endif

#else /* __LINUX_ARM_ARCH__ >= 6 */
#else /* min ARCH >= ARMv6 */

extern void __bad_cmpxchg(volatile void *ptr, int size);

Expand All @@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long oldval, res;

switch (size) {
#ifdef CONFIG_CPU_32v6K
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
do {
asm volatile("@ __cmpxchg1\n"
Expand All @@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
: "memory", "cc");
} while (res);
break;
#endif /* CONFIG_CPU_32v6K */
#endif
case 4:
do {
asm volatile("@ __cmpxchg4\n"
Expand Down Expand Up @@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long ret;

switch (size) {
#ifndef CONFIG_CPU_32v6K
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size);
break;
#endif /* !CONFIG_CPU_32v6K */
#endif
default:
ret = __cmpxchg(ptr, old, new, size);
}
Expand All @@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \
sizeof(*(ptr))))

#ifdef CONFIG_CPU_32v6K
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */

/*
* Note : ARMv7-M (currently unsupported by Linux) does not support
Expand Down Expand Up @@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
(unsigned long long)(o), \
(unsigned long long)(n)))

#else /* !CONFIG_CPU_32v6K */
#else /* min ARCH = ARMv6 */

#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))

#endif /* CONFIG_CPU_32v6K */
#endif

#endif /* __LINUX_ARM_ARCH__ >= 6 */

Expand Down
Loading

0 comments on commit bd1274d

Please sign in to comment.