Skip to content

Commit

Permalink
ARM: Bring back ARMv3 IO and user access code
Browse files Browse the repository at this point in the history
This partially reverts 357c9c1
(ARM: Remove support for ARMv3 ARM610 and ARM710 CPUs).

Although we only support StrongARM on the RiscPC, we need to keep the
ARMv3 user access code for this platform because the bus does not
understand half-word load/stores.

Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
  • Loading branch information
Russell King committed Aug 13, 2012
1 parent 730a812 commit 080fc66
Show file tree
Hide file tree
Showing 4 changed files with 816 additions and 3 deletions.
23 changes: 20 additions & 3 deletions arch/arm/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
call_with_stack.o

mmu-y := clear_user.o copy_page.o getuser.o putuser.o
mmu-y += copy_from_user.o copy_to_user.o

# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
mmu-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)
mmu-y += copy_from_user.o copy_to_user.o
else
mmu-y += uaccess.o
endif
endif

# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o

lib-$(CONFIG_MMU) += $(mmu-y)
lib-y += io-readsw-armv4.o io-writesw-armv4.o
lib-$(CONFIG_MMU) += $(mmu-y)

ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o
else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif

lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
lib-$(CONFIG_ARCH_SHARK) += io-shark.o

Expand Down
106 changes: 106 additions & 0 deletions arch/arm/lib/io-readsw-armv3.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
/*
* linux/arch/arm/lib/io-readsw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>

.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
mov r2, lr
b panic
.Linsw_bad_align_msg:
.asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align

.Linsw_align: tst r1, #1
bne .Linsw_bad_alignment

ldr r3, [r0]
strb r3, [r1], #1
mov r3, r3, lsr #8
strb r3, [r1], #1

subs r2, r2, #1
moveq pc, lr

ENTRY(__raw_readsw)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
tst r1, #3
bne .Linsw_align

.Linsw_aligned: mov ip, #0xff
orr ip, ip, ip, lsl #8
stmfd sp!, {r4, r5, r6, lr}

subs r2, r2, #8
bmi .Lno_insw_8

.Linsw_8_lp: ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16

ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16

ldr r5, [r0]
and r5, r5, ip
ldr r6, [r0]
orr r5, r5, r6, lsl #16

ldr r6, [r0]
and r6, r6, ip
ldr lr, [r0]
orr r6, r6, lr, lsl #16

stmia r1!, {r3 - r6}

subs r2, r2, #8
bpl .Linsw_8_lp

tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}

.Lno_insw_8: tst r2, #4
beq .Lno_insw_4

ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16

ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16

stmia r1!, {r3, r4}

.Lno_insw_4: tst r2, #2
beq .Lno_insw_2

ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16

str r3, [r1], #4

.Lno_insw_2: tst r2, #1
ldrne r3, [r0]
strneb r3, [r1], #1
movne r3, r3, lsr #8
strneb r3, [r1]

ldmfd sp!, {r4, r5, r6, pc}


126 changes: 126 additions & 0 deletions arch/arm/lib/io-writesw-armv3.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
/*
* linux/arch/arm/lib/io-writesw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>

.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
mov r2, lr
b panic
.Loutsw_bad_align_msg:
.asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align

.Loutsw_align: tst r1, #1
bne .Loutsw_bad_alignment

add r1, r1, #2

ldr r3, [r1, #-4]
mov r3, r3, lsr #16
orr r3, r3, r3, lsl #16
str r3, [r0]
subs r2, r2, #1
moveq pc, lr

ENTRY(__raw_writesw)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
tst r1, #3
bne .Loutsw_align

stmfd sp!, {r4, r5, r6, lr}

subs r2, r2, #8
bmi .Lno_outsw_8

.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}

mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

mov ip, r5, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r5, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

mov ip, r6, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r6, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

subs r2, r2, #8
bpl .Loutsw_8_lp

tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}

.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4

ldmia r1!, {r3, r4}

mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

.Lno_outsw_4: tst r2, #2
beq .Lno_outsw_2

ldr r3, [r1], #4

mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]

mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]

.Lno_outsw_2: tst r2, #1

ldrne r3, [r1]

movne ip, r3, lsl #16
orrne ip, ip, ip, lsr #16
strne ip, [r0]

ldmfd sp!, {r4, r5, r6, pc}
Loading

0 comments on commit 080fc66

Please sign in to comment.