Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Browse files Browse the repository at this point in the history
Pull ARM updates (part one) from Russell King:

 - MMC patches from Ulf Hansson and Pawel Moll.  These add support for
   DDR mode and the latest variant found on ARM Versatile Express, as
   well as a number of cleanups.

 - A fix for to improve the behaviour of ARMs sched_clock()

 - Changes to the ARM ioremap() code.  I'm not convinced with the
   primary arguments for this, but it's been around for a while, and
   people seem happy with it - and the "other" justification for this is
   at

      http://lkml.org/lkml/2012/12/6/184

 - Add SCHED_HRTICK to ARMs Kconfig

 - Making the ARM SHA/AES code Thumb-2 compatible

 - A collection of other small updates.

* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (26 commits)
  ARM: add SCHED_HRTICK config option
  ARM: 7650/1: mm: replace direct access to mm->context.id with new macro
  ARM: 7649/1: mm: mm->context.id fix for big-endian
  ARM: 7648/1: pci: Allow passing per-controller private data
  ARM: 7647/1: pci: Keep pci_common_init() around after init
  ARM: fix warnings introduced by previous patch
  ARM: 7646/1: mm: use static_vm for managing static mapped areas
  ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area
  ARM: 7644/1: vmregion: remove vmregion code entirely
  MAINTAINERS: Re-assert MMCI driver maintainer status
  MAINTAINERS: add additional file for MMCI driver
  MAINTAINERS: add maintainer entry for AMBA serial drivers
  ARM: 7637/1: memory: use SZ_ constants for defining the virtual memory layout
  ARM: 7643/1: sched: correct update_sched_clock()
  ARM: 7635/1: versatile: fix the PCI IRQ regression
  ARM: 7639/1: cache-l2x0: add missed dummy outer_resume entry
  ARM: 7630/1: mmc: mmci: Fixup and cleanup code for DMA handling
  ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path
  ARM: 7631/1: mmc: mmci: Add new VE MMCI variant
  ARM: 7623/1: mmc: mmci: Fixup clock gating when freq is 0 for ST-variants
  ...
  • Loading branch information
Linus Torvalds committed Feb 20, 2013
2 parents e177bb5 + 1b1c740 commit 32f9aab
Show file tree
Hide file tree
Showing 26 changed files with 401 additions and 540 deletions.
10 changes: 9 additions & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.*
F: include/linux/amba/kmi.h

ARM PRIMECELL MMCI PL180/1 DRIVER
S: Orphan
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/mmc/host/mmci.*
F: include/linux/amba/mmci.h

ARM PRIMECELL UART PL010 AND PL011 DRIVERS
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/tty/serial/amba-pl01*.c
F: include/linux/amba/serial.h

ARM PRIMECELL BUS SUPPORT
M: Russell King <linux@arm.linux.org.uk>
Expand Down
3 changes: 3 additions & 0 deletions arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1654,6 +1654,9 @@ config HZ
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
default 100

config SCHED_HRTICK
def_bool HIGH_RES_TIMERS

config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode"
depends on CPU_V7 && !CPU_V6 && !CPU_V6K
Expand Down
64 changes: 20 additions & 44 deletions arch/arm/crypto/aes-armv4.S
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,9 @@
@ A little glue here to select the correct code below for the ARM CPU
@ that is being targetted.

#include <linux/linkage.h>

.text
.code 32

.type AES_Te,%object
.align 5
Expand Down Expand Up @@ -145,10 +146,8 @@ AES_Te:

@ void AES_encrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
.global AES_encrypt
.type AES_encrypt,%function
.align 5
AES_encrypt:
ENTRY(AES_encrypt)
sub r3,pc,#8 @ AES_encrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
Expand Down Expand Up @@ -239,15 +238,8 @@ AES_encrypt:
strb r6,[r12,#14]
strb r3,[r12,#15]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_encrypt,.-AES_encrypt
ENDPROC(AES_encrypt)

.type _armv4_AES_encrypt,%function
.align 2
Expand Down Expand Up @@ -386,10 +378,8 @@ _armv4_AES_encrypt:
ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt

.global private_AES_set_encrypt_key
.type private_AES_set_encrypt_key,%function
.align 5
private_AES_set_encrypt_key:
ENTRY(private_AES_set_encrypt_key)
_armv4_AES_set_encrypt_key:
sub r3,pc,#8 @ AES_set_encrypt_key
teq r0,#0
Expand Down Expand Up @@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:

.Ldone: mov r0,#0
ldmia sp!,{r4-r12,lr}
.Labrt: tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
.Labrt: mov pc,lr
ENDPROC(private_AES_set_encrypt_key)

.global private_AES_set_decrypt_key
.type private_AES_set_decrypt_key,%function
.align 5
private_AES_set_decrypt_key:
ENTRY(private_AES_set_decrypt_key)
str lr,[sp,#-4]! @ push lr
#if 0
@ kernel does both of these in setkey so optimise this bit out by
Expand Down Expand Up @@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
bne .Lmix

mov r0,#0
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
ENDPROC(private_AES_set_decrypt_key)

.type AES_Td,%object
.align 5
Expand Down Expand Up @@ -862,10 +841,8 @@ AES_Td:

@ void AES_decrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
.global AES_decrypt
.type AES_decrypt,%function
.align 5
AES_decrypt:
ENTRY(AES_decrypt)
sub r3,pc,#8 @ AES_decrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
Expand Down Expand Up @@ -956,15 +933,8 @@ AES_decrypt:
strb r6,[r12,#14]
strb r3,[r12,#15]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_decrypt,.-AES_decrypt
ENDPROC(AES_decrypt)

.type _armv4_AES_decrypt,%function
.align 2
Expand Down Expand Up @@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
and r9,lr,r1,lsr#8

ldrb r7,[r10,r7] @ Td4[s1>>0]
ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24]
ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
THUMB( ldrb r1,[r1] )
ldrb r8,[r10,r8] @ Td4[s1>>16]
eor r0,r7,r0,lsl#24
ldrb r9,[r10,r9] @ Td4[s1>>8]
Expand All @@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
ldrb r8,[r10,r8] @ Td4[s2>>0]
and r9,lr,r2,lsr#16

ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24]
ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
THUMB( ldrb r2,[r2] )
eor r0,r0,r7,lsl#8
ldrb r9,[r10,r9] @ Td4[s2>>16]
eor r1,r8,r1,lsl#16
Expand All @@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
and r9,lr,r3 @ i2

ldrb r9,[r10,r9] @ Td4[s3>>0]
ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24]
ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
THUMB( ldrb r3,[r3] )
eor r0,r0,r7,lsl#16
ldr r7,[r11,#0]
eor r1,r1,r8,lsl#8
Expand Down
24 changes: 9 additions & 15 deletions arch/arm/crypto/sha1-armv4-large.S
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,12 @@
@ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte.

.text
#include <linux/linkage.h>

.global sha1_block_data_order
.type sha1_block_data_order,%function
.text

.align 2
sha1_block_data_order:
ENTRY(sha1_block_data_order)
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
Expand Down Expand Up @@ -194,7 +193,7 @@ sha1_block_data_order:
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
teq r14,sp
cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3]
#if __ARM_ARCH__<7
ldrb r10,[r1,#2]
Expand Down Expand Up @@ -374,7 +373,9 @@ sha1_block_data_order:
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
teq r14,sp @ preserve carry
ARM( teq r14,sp ) @ preserve carry
THUMB( mov r11,sp )
THUMB( teq r14,r11 ) @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes

Expand Down Expand Up @@ -466,7 +467,7 @@ sha1_block_data_order:
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
teq r14,sp
cmp r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4]

ldr r8,.LK_60_79
Expand All @@ -485,19 +486,12 @@ sha1_block_data_order:
teq r1,r2
bne .Lloop @ [+18], total 1307

#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.align 2
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
.size sha1_block_data_order,.-sha1_block_data_order
ENDPROC(sha1_block_data_order)
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2
1 change: 1 addition & 0 deletions arch/arm/include/asm/mach/pci.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ struct hw_pci {
#endif
struct pci_ops *ops;
int nr_controllers;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
void (*preinit)(void);
Expand Down
8 changes: 4 additions & 4 deletions arch/arm/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,23 +36,23 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)

/*
* The maximum size of a 26-bit user space task.
*/
#define TASK_SIZE_26 UL(0x04000000)
#define TASK_SIZE_26 (UL(1) << 26)

/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#ifndef CONFIG_THUMB2_KERNEL
#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
#else
/* smaller range for Thumb-2 symbols relocation (2^24)*/
#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
#endif

#if TASK_SIZE > MODULES_VADDR
Expand Down
1 change: 1 addition & 0 deletions arch/arm/include/asm/outercache.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_all(void) { }
static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { }
static inline void outer_resume(void) { }

#endif

Expand Down
16 changes: 1 addition & 15 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
u32 slock;

smp_mb();

__asm__ __volatile__(
" mov %1, #1\n"
"1: ldrex %0, [%2]\n"
" uadd16 %0, %0, %1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (slock), "=&r" (tmp)
: "r" (&lock->slock)
: "cc");

lock->tickets.owner++;
dsb_sev();
}

Expand Down
9 changes: 6 additions & 3 deletions arch/arm/kernel/bios32.c
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}

static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
{
int ret;
struct pci_host_bridge_window *window;
Expand Down Expand Up @@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
return 0;
}

static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
{
struct pci_sys_data *sys = NULL;
int ret;
Expand All @@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
sys->map_irq = hw->map_irq;
INIT_LIST_HEAD(&sys->resources);

if (hw->private_data)
sys->private_data = hw->private_data[nr];

ret = hw->setup(nr, sys);

if (ret > 0) {
Expand Down Expand Up @@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
}
}

void __init pci_common_init(struct hw_pci *hw)
void pci_common_init(struct hw_pci *hw)
{
struct pci_sys_data *sys;
LIST_HEAD(head);
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/kernel/sched_clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags);
cd.epoch_cyc = cyc;
cd.epoch_cyc_copy = cyc;
smp_wmb();
cd.epoch_ns = ns;
smp_wmb();
cd.epoch_cyc_copy = cyc;
cd.epoch_cyc = cyc;
raw_local_irq_restore(flags);
}

Expand Down
Loading

0 comments on commit 32f9aab

Please sign in to comment.