Skip to content

Commit

Permalink
[ARM] 3256/1: Make the function-returning ldm's use sp as the base re…
Browse files Browse the repository at this point in the history
…gister

Patch from Catalin Marinas

If the low interrupt latency mode is enabled for the CPU (from ARMv6
onwards), the ldm/stm instructions are no longer atomic. An ldm instruction
restoring the sp and pc registers can be interrupted immediately after sp
was updated but before the pc. If this happens, the CPU restores the base
register to the value before the ldm instruction but if the base register
is not sp, the interrupt routine will corrupt the stack and the restarted
ldm instruction will load garbage.

Note that future ARM cores might always run in the low interrupt latency
mode.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
  • Loading branch information
Catalin Marinas authored and Russell King committed Jan 12, 2006
1 parent ece5f7b commit 90303b1
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 11 deletions.
4 changes: 2 additions & 2 deletions arch/arm/kernel/fiq.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
ldmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
ldmea fp, {fp, sp, pc}"
ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
Expand All @@ -119,7 +119,7 @@ void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
stmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
ldmea fp, {fp, sp, pc}"
ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
Expand Down
6 changes: 4 additions & 2 deletions arch/arm/lib/csumpartialcopy.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
*/

.macro save_regs
mov ip, sp
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
sub fp, ip, #4
.endm

.macro load_regs,flags
LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc})
.macro load_regs
ldmfd sp, {r1, r4 - r8, fp, sp, pc}
.endm

.macro load1b, reg1
Expand Down
6 changes: 2 additions & 4 deletions arch/arm/lib/csumpartialcopygeneric.S
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ len .req r2
sum .req r3

.Lzero: mov r0, sum
load_regs ea
load_regs

/*
* Align an unaligned destination pointer. We know that
Expand Down Expand Up @@ -87,9 +87,7 @@ sum .req r3
b .Ldone

FN_ENTRY
mov ip, sp
save_regs
sub fp, ip, #4

cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
Expand Down Expand Up @@ -163,7 +161,7 @@ FN_ENTRY
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs ea
load_regs

.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
Expand Down
8 changes: 5 additions & 3 deletions arch/arm/lib/csumpartialcopyuser.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
.text

.macro save_regs
mov ip, sp
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
sub fp, ip, #4
.endm

.macro load_regs,flags
ldm\flags fp, {r1, r2, r4-r8, fp, sp, pc}
.macro load_regs
ldmfd sp, {r1, r2, r4-r8, fp, sp, pc}
.endm

.macro load1b, reg1
Expand Down Expand Up @@ -100,5 +102,5 @@
6002: teq r2, r1
strneb r0, [r1], #1
bne 6002b
load_regs ea
load_regs
.previous

0 comments on commit 90303b1

Please sign in to comment.