Skip to content

Commit

Permalink
riscv: use ".L" local labels in assembly when applicable
Browse files Browse the repository at this point in the history
For the sake of coherency, use local labels in assembly when
applicable. This also avoid kprobes being confused when applying a
kprobe since the size of function is computed by checking where the
next visible symbol is located. This might end up in computing some
function size to be way shorter than expected and thus failing to apply
kprobes to the specified offset.

Signed-off-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20231024132655.730417-2-cleger@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
  • Loading branch information
Clément Léger authored and Palmer Dabbelt committed Nov 6, 2023
1 parent 57a4542 commit b18f729
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 44 deletions.
6 changes: 3 additions & 3 deletions arch/riscv/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
bnez tp, _save_context
bnez tp, .Lsave_context

_restore_kernel_tpsp:
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)

Expand All @@ -40,7 +40,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif

_save_context:
.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
Expand Down
18 changes: 9 additions & 9 deletions arch/riscv/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -164,12 +164,12 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
#endif
call setup_trap_vector
call .Lsetup_trap_vector
tail smp_callin
#endif /* CONFIG_SMP */

.align 2
setup_trap_vector:
.Lsetup_trap_vector:
/* Set trap vector to exception handler */
la a0, handle_exception
csrw CSR_TVEC, a0
Expand Down Expand Up @@ -206,15 +206,15 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap.
*/
la a0, pmp_done
la a0, .Lpmp_done
csrw CSR_TVEC, a0

li a0, -1
csrw CSR_PMPADDR0, a0
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
.align 2
pmp_done:
.Lpmp_done:

/*
* The hartid in a0 is expected later on, and we have no firmware
Expand Down Expand Up @@ -275,12 +275,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
ble a4, a3, clear_bss_done
clear_bss:
ble a4, a3, .Lclear_bss_done
.Lclear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
blt a3, a4, clear_bss
clear_bss_done:
blt a3, a4, .Lclear_bss
.Lclear_bss_done:
#endif
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
Expand All @@ -305,7 +305,7 @@ clear_bss_done:
call relocate_enable_mmu
#endif /* CONFIG_MMU */

call setup_trap_vector
call .Lsetup_trap_vector
/* Restore C environment */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
Expand Down
10 changes: 5 additions & 5 deletions arch/riscv/kernel/mcount.S
Original file line number Diff line number Diff line change
Expand Up @@ -85,24 +85,24 @@ ENTRY(MCOUNT_NAME)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
REG_L t1, 0(t0)
bne t1, t4, do_ftrace_graph_caller
bne t1, t4, .Ldo_ftrace_graph_caller

la t3, ftrace_graph_entry
REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub
bne t2, t6, do_ftrace_graph_caller
bne t2, t6, .Ldo_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
REG_L t5, 0(t3)
bne t5, t4, do_trace
bne t5, t4, .Ldo_trace
ret

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
do_ftrace_graph_caller:
.Ldo_ftrace_graph_caller:
addi a0, s0, -SZREG
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
Expand All @@ -118,7 +118,7 @@ do_ftrace_graph_caller:
* A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
do_trace:
.Ldo_trace:
REG_L a1, -SZREG(s0)
mv a0, ra

Expand Down
54 changes: 27 additions & 27 deletions arch/riscv/lib/memmove.S
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
*/

/* Return if nothing to do */
beq a0, a1, return_from_memmove
beqz a2, return_from_memmove
beq a0, a1, .Lreturn_from_memmove
beqz a2, .Lreturn_from_memmove

/*
* Register Uses
Expand Down Expand Up @@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
* small enough not to bother.
*/
andi t0, a2, -(2 * SZREG)
beqz t0, byte_copy
beqz t0, .Lbyte_copy

/*
* Now solve for t5 and t6.
Expand All @@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
*/
xor t0, a0, a1
andi t1, t0, (SZREG - 1)
beqz t1, coaligned_copy
beqz t1, .Lcoaligned_copy
/* Fall through to misaligned fixup copy */

misaligned_fixup_copy:
bltu a1, a0, misaligned_fixup_copy_reverse
.Lmisaligned_fixup_copy:
bltu a1, a0, .Lmisaligned_fixup_copy_reverse

misaligned_fixup_copy_forward:
jal t0, byte_copy_until_aligned_forward
.Lmisaligned_fixup_copy_forward:
jal t0, .Lbyte_copy_until_aligned_forward

andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
Expand Down Expand Up @@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
mv t3, t6 /* Fix the dest pointer in case the loop was broken */

add a1, t3, a5 /* Restore the src pointer */
j byte_copy_forward /* Copy any remaining bytes */
j .Lbyte_copy_forward /* Copy any remaining bytes */

misaligned_fixup_copy_reverse:
jal t0, byte_copy_until_aligned_reverse
.Lmisaligned_fixup_copy_reverse:
jal t0, .Lbyte_copy_until_aligned_reverse

andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
Expand Down Expand Up @@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
mv t4, t5 /* Fix the dest pointer in case the loop was broken */

add a4, t4, a5 /* Restore the src pointer */
j byte_copy_reverse /* Copy any remaining bytes */
j .Lbyte_copy_reverse /* Copy any remaining bytes */

/*
* Simple copy loops for SZREG co-aligned memory locations.
* These also make calls to do byte copies for any unaligned
* data at their terminations.
*/
coaligned_copy:
bltu a1, a0, coaligned_copy_reverse
.Lcoaligned_copy:
bltu a1, a0, .Lcoaligned_copy_reverse

coaligned_copy_forward:
jal t0, byte_copy_until_aligned_forward
.Lcoaligned_copy_forward:
jal t0, .Lbyte_copy_until_aligned_forward

1:
REG_L t1, ( 0 * SZREG)(a1)
Expand All @@ -235,10 +235,10 @@ coaligned_copy_forward:
REG_S t1, (-1 * SZREG)(t3)
bne t3, t6, 1b

j byte_copy_forward /* Copy any remaining bytes */
j .Lbyte_copy_forward /* Copy any remaining bytes */

coaligned_copy_reverse:
jal t0, byte_copy_until_aligned_reverse
.Lcoaligned_copy_reverse:
jal t0, .Lbyte_copy_until_aligned_reverse

1:
REG_L t1, (-1 * SZREG)(a4)
Expand All @@ -247,7 +247,7 @@ coaligned_copy_reverse:
REG_S t1, ( 0 * SZREG)(t4)
bne t4, t5, 1b

j byte_copy_reverse /* Copy any remaining bytes */
j .Lbyte_copy_reverse /* Copy any remaining bytes */

/*
* These are basically sub-functions within the function. They
Expand All @@ -258,7 +258,7 @@ coaligned_copy_reverse:
* up from where they were left and we avoid code duplication
* without any overhead except the call in and return jumps.
*/
byte_copy_until_aligned_forward:
.Lbyte_copy_until_aligned_forward:
beq t3, t5, 2f
1:
lb t1, 0(a1)
Expand All @@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
2:
jalr zero, 0x0(t0) /* Return to multibyte copy loop */

byte_copy_until_aligned_reverse:
.Lbyte_copy_until_aligned_reverse:
beq t4, t6, 2f
1:
lb t1, -1(a4)
Expand All @@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
* These will byte copy until they reach the end of data to copy.
* At that point, they will call to return from memmove.
*/
byte_copy:
bltu a1, a0, byte_copy_reverse
.Lbyte_copy:
bltu a1, a0, .Lbyte_copy_reverse

byte_copy_forward:
.Lbyte_copy_forward:
beq t3, t4, 2f
1:
lb t1, 0(a1)
Expand All @@ -299,7 +299,7 @@ byte_copy_forward:
2:
ret

byte_copy_reverse:
.Lbyte_copy_reverse:
beq t4, t3, 2f
1:
lb t1, -1(a4)
Expand All @@ -309,7 +309,7 @@ byte_copy_reverse:
bne t4, t3, 1b
2:

return_from_memmove:
.Lreturn_from_memmove:
ret

SYM_FUNC_END(memmove)
Expand Down

0 comments on commit b18f729

Please sign in to comment.