Skip to content

Commit

Permalink
[ARM] 3152/1: make various assembly local labels actually local (the …
Browse files Browse the repository at this point in the history
…rest)

Patch from Nicolas Pitre

For assembly labels to actually be local they must start with ".L" and
not only "." otherwise they still remain visible in the final link and
clutter kallsyms needlessly, and possibly make for unclear symbolic
backtrace. This patch simply inserts a"L" where appropriate. The code
itself is unchanged.

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
  • Loading branch information
Nicolas Pitre authored and Russell King committed Nov 11, 2005
1 parent a9c4814 commit 8adbb37
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 59 deletions.
28 changes: 14 additions & 14 deletions arch/arm/lib/csumpartial.S
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,16 @@ td1 .req r4 @ save before use
td2 .req r5 @ save before use
td3 .req lr

.zero: mov r0, sum
.Lzero: mov r0, sum
add sp, sp, #4
ldr pc, [sp], #4

/*
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.less8: teq len, #0 @ check for zero count
beq .zero
.Lless8: teq len, #0 @ check for zero count
beq .Lzero

/* we must have at least one byte. */
tst buf, #1 @ odd address?
Expand All @@ -44,12 +44,12 @@ td3 .req lr
subne len, len, #1
adcnes sum, sum, td0, put_byte_1

.less4: tst len, #6
beq .less8_byte
.Lless4: tst len, #6
beq .Lless8_byte

/* we are now half-word aligned */

.less8_wordlp:
.Lless8_wordlp:
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
Expand All @@ -65,19 +65,19 @@ td3 .req lr
#endif
adcs sum, sum, td0
tst len, #6
bne .less8_wordlp
bne .Lless8_wordlp

.less8_byte: tst len, #1 @ odd number of bytes
.Lless8_byte: tst len, #1 @ odd number of bytes
ldrneb td0, [buf], #1 @ include last byte
adcnes sum, sum, td0, put_byte_0 @ update checksum

.done: adc r0, sum, #0 @ collect up the last carry
.Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return

.not_aligned: tst buf, #1 @ odd address
.Lnot_aligned: tst buf, #1 @ odd address
ldrneb td0, [buf], #1 @ make even
subne len, len, #1
adcnes sum, sum, td0, put_byte_1 @ update checksum
Expand All @@ -102,14 +102,14 @@ td3 .req lr
ENTRY(csum_partial)
stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy.
blo .Lless8 @ 8 bytes to copy.

tst buf, #1
movne sum, sum, ror #8

adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
blne .not_aligned @ aligh destination, return here
blne .Lnot_aligned @ align destination, return here

1: bics ip, len, #31
beq 3f
Expand All @@ -131,11 +131,11 @@ ENTRY(csum_partial)
ldmfd sp!, {r4 - r5}

3: tst len, #0x1c @ should not change C
beq .less4
beq .Lless4

4: ldr td0, [buf], #4
sub len, len, #4
adcs sum, sum, td0
tst len, #0x1c
bne 4b
b .less4
b .Lless4
70 changes: 36 additions & 34 deletions arch/arm/lib/csumpartialcopygeneric.S
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ dst .req r1
len .req r2
sum .req r3

.zero: mov r0, sum
.Lzero: mov r0, sum
load_regs ea

/*
Expand All @@ -31,8 +31,9 @@ sum .req r3
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
.dst_unaligned: tst dst, #1
beq .dst_16bit
.Ldst_unaligned:
tst dst, #1
beq .Ldst_16bit

load1b ip
sub len, len, #1
Expand All @@ -41,7 +42,7 @@ sum .req r3
tst dst, #2
moveq pc, lr @ dst is now 32bit aligned

.dst_16bit: load2b r8, ip
.Ldst_16bit: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
Expand All @@ -53,48 +54,49 @@ sum .req r3
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
.less8: teq len, #0 @ check for zero count
beq .zero
.Lless8: teq len, #0 @ check for zero count
beq .Lzero

/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
beq .less8_aligned
beq .Lless8_aligned

/* Align dst */
load1b ip
sub len, len, #1
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
beq .less8_byteonly
beq .Lless8_byteonly

1: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
.less8_aligned: tst len, #6
.Lless8_aligned:
tst len, #6
bne 1b
.less8_byteonly:
.Lless8_byteonly:
tst len, #1
beq .done
beq .Ldone
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
b .done
b .Ldone

FN_ENTRY
mov ip, sp
save_regs
sub fp, ip, #4

cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy.
blo .Lless8 @ 8 bytes to copy.

adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
blne .dst_unaligned @ align destination, return here
blne .Ldst_unaligned @ align destination, return here

/*
* Ok, the dst pointer is now 32bit aligned, and we know
Expand All @@ -103,7 +105,7 @@ FN_ENTRY
*/

tst src, #3 @ Test source alignment
bne .src_not_aligned
bne .Lsrc_not_aligned

/* Routine for src & dst aligned */

Expand Down Expand Up @@ -136,17 +138,17 @@ FN_ENTRY
adcs sum, sum, r4

4: ands len, len, #3
beq .done
beq .Ldone
load1l r4
tst len, #2
mov r5, r4, get_byte_0
beq .exit
beq .Lexit
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
.exit: tst len, #1
.Lexit: tst len, #1
strneb r5, [dst], #1
andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0
Expand All @@ -157,20 +159,20 @@ FN_ENTRY
* the inefficient byte manipulations in the
* architecture independent code.
*/
.done: adc r0, sum, #0
.Ldone: adc r0, sum, #0
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs ea

.src_not_aligned:
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
beq .src2_aligned
bhi .src3_aligned
beq .Lsrc2_aligned
bhi .Lsrc3_aligned
mov r4, r5, pull #8 @ C = 0
bics ip, len, #15
beq 2f
Expand Down Expand Up @@ -211,18 +213,18 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #8
4: ands len, len, #3
beq .done
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .exit
beq .Lexit
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
b .exit
b .Lexit

.src2_aligned: mov r4, r5, pull #16
.Lsrc2_aligned: mov r4, r5, pull #16
adds sum, sum, #0
bics ip, len, #15
beq 2f
Expand Down Expand Up @@ -263,20 +265,20 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #16
4: ands len, len, #3
beq .done
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .exit
beq .Lexit
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
beq .done
beq .Ldone
load1b r5
b .exit
b .Lexit

.src3_aligned: mov r4, r5, pull #24
.Lsrc3_aligned: mov r4, r5, pull #24
adds sum, sum, #0
bics ip, len, #15
beq 2f
Expand Down Expand Up @@ -317,15 +319,15 @@ FN_ENTRY
adcs sum, sum, r4
mov r4, r5, pull #24
4: ands len, len, #3
beq .done
beq .Ldone
mov r5, r4, get_byte_0
tst len, #2
beq .exit
beq .Lexit
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
mov r5, r4, get_byte_0
strb r5, [dst], #1
adcs sum, sum, r4, push #24
mov r5, r4, get_byte_1
b .exit
b .Lexit
4 changes: 2 additions & 2 deletions arch/arm/lib/delay.S
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#include <asm/assembler.h>
.text

LC0: .word loops_per_jiffy
.LC0: .word loops_per_jiffy

/*
* 0 <= r0 <= 2000
Expand All @@ -21,7 +21,7 @@ ENTRY(__udelay)
orr r2, r2, #0x00db
mul r0, r2, r0
ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
ldr r2, LC0
ldr r2, .LC0
ldr r2, [r2] @ max = 0x0fffffff
mov r0, r0, lsr #11 @ max = 0x00003fff
mov r2, r2, lsr #11 @ max = 0x0003ffff
Expand Down
Loading

0 comments on commit 8adbb37

Please sign in to comment.