Skip to content

Commit

Permalink
riscv: Use SYM_*() assembly macros instead of deprecated ones
Browse files Browse the repository at this point in the history
ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
new SYM_*() macros [1] for better annotation of symbols. Replace the
deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
to correctly describe the symbols.

[1] https://docs.kernel.org/core-api/asm-annotations.html

Signed-off-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20231024132655.730417-3-cleger@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
  • Loading branch information
Clément Léger authored and Palmer Dabbelt committed Nov 6, 2023
1 parent b18f729 commit 76329c6
Show file tree
Hide file tree
Showing 17 changed files with 60 additions and 74 deletions.
8 changes: 4 additions & 4 deletions arch/riscv/kernel/copy-unaligned.S
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
/* Note: The size is truncated to a multiple of 8 * SZREG */
ENTRY(__riscv_copy_words_unaligned)
SYM_FUNC_START(__riscv_copy_words_unaligned)
andi a4, a2, ~((8*SZREG)-1)
beqz a4, 2f
add a3, a1, a4
Expand All @@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)

2:
ret
END(__riscv_copy_words_unaligned)
SYM_FUNC_END(__riscv_copy_words_unaligned)

/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using only byte accesses. */
/* Note: The size is truncated to a multiple of 8 */
ENTRY(__riscv_copy_bytes_unaligned)
SYM_FUNC_START(__riscv_copy_bytes_unaligned)
andi a4, a2, ~(8-1)
beqz a4, 2f
add a3, a1, a4
Expand All @@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)

2:
ret
END(__riscv_copy_bytes_unaligned)
SYM_FUNC_END(__riscv_copy_bytes_unaligned)
8 changes: 4 additions & 4 deletions arch/riscv/kernel/fpu.S
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
#include <asm/csr.h>
#include <asm/asm-offsets.h>

ENTRY(__fstate_save)
SYM_FUNC_START(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
Expand Down Expand Up @@ -60,9 +60,9 @@ ENTRY(__fstate_save)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_STATUS, t1
ret
ENDPROC(__fstate_save)
SYM_FUNC_END(__fstate_save)

ENTRY(__fstate_restore)
SYM_FUNC_START(__fstate_restore)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
Expand Down Expand Up @@ -103,7 +103,7 @@ ENTRY(__fstate_restore)
fscsr t0
csrc CSR_STATUS, t1
ret
ENDPROC(__fstate_restore)
SYM_FUNC_END(__fstate_restore)

#define get_f32(which) fmv.x.s a0, which; j 2f
#define put_f32(which) fmv.s.x which, a1; j 2f
Expand Down
12 changes: 6 additions & 6 deletions arch/riscv/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
#include "efi-header.S"

__HEAD
ENTRY(_start)
SYM_CODE_START(_start)
/*
* Image header expected by Linux boot-loaders. The image header data
* structure is described in asm/image.h.
Expand Down Expand Up @@ -187,9 +187,9 @@ secondary_start_sbi:
wfi
j .Lsecondary_park

END(_start)
SYM_CODE_END(_start)

ENTRY(_start_kernel)
SYM_CODE_START(_start_kernel)
/* Mask all interrupts */
csrw CSR_IE, zero
csrw CSR_IP, zero
Expand Down Expand Up @@ -348,10 +348,10 @@ ENTRY(_start_kernel)
tail .Lsecondary_start_common
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */

END(_start_kernel)
SYM_CODE_END(_start_kernel)

#ifdef CONFIG_RISCV_M_MODE
ENTRY(reset_regs)
SYM_CODE_START_LOCAL(reset_regs)
li sp, 0
li gp, 0
li tp, 0
Expand Down Expand Up @@ -449,5 +449,5 @@ ENTRY(reset_regs)
.Lreset_regs_done_vector:
#endif /* CONFIG_RISCV_ISA_V */
ret
END(reset_regs)
SYM_CODE_END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
12 changes: 6 additions & 6 deletions arch/riscv/kernel/hibernate-asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
*
* Always returns 0
*/
ENTRY(__hibernate_cpu_resume)
SYM_FUNC_START(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma
Expand All @@ -34,31 +34,31 @@ ENTRY(__hibernate_cpu_resume)
mv a0, zero

ret
END(__hibernate_cpu_resume)
SYM_FUNC_END(__hibernate_cpu_resume)

/*
* Prepare to restore the image.
* a0: satp of saved page tables.
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
ENTRY(hibernate_restore_image)
SYM_FUNC_START(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
REG_L s4, restore_pblist
REG_L a1, relocated_restore_code

jr a1
END(hibernate_restore_image)
SYM_FUNC_END(hibernate_restore_image)

/*
* The below code will be executed from a 'safe' page.
* It first switches to the temporary page table, then starts to copy the pages
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
ENTRY(hibernate_core_restore_code)
SYM_FUNC_START(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
Expand All @@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
bnez s4, .Lcopy

jr s2
END(hibernate_core_restore_code)
SYM_FUNC_END(hibernate_core_restore_code)
20 changes: 8 additions & 12 deletions arch/riscv/kernel/mcount-dyn.S
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
.endm
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */

ENTRY(ftrace_caller)
SYM_FUNC_START(ftrace_caller)
SAVE_ABI

addi a0, t0, -FENTRY_RA_OFFSET
Expand All @@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
mv a1, ra
mv a3, sp

ftrace_call:
.global ftrace_call
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Expand All @@ -102,16 +101,15 @@ ftrace_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
ftrace_graph_call:
.global ftrace_graph_call
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ABI
jr t0
ENDPROC(ftrace_caller)
SYM_FUNC_END(ftrace_caller)

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
SYM_FUNC_START(ftrace_regs_caller)
SAVE_ALL

addi a0, t0, -FENTRY_RA_OFFSET
Expand All @@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
mv a1, ra
mv a3, sp

ftrace_regs_call:
.global ftrace_regs_call
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Expand All @@ -131,12 +128,11 @@ ftrace_regs_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
ftrace_graph_regs_call:
.global ftrace_graph_regs_call
SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
call ftrace_stub
#endif

RESTORE_ALL
jr t0
ENDPROC(ftrace_regs_caller)
SYM_FUNC_END(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
8 changes: 4 additions & 4 deletions arch/riscv/kernel/mcount.S
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
ret
SYM_FUNC_END(ftrace_stub_graph)

ENTRY(return_to_handler)
SYM_FUNC_START(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
Expand All @@ -76,11 +76,11 @@ ENTRY(return_to_handler)
mv a2, a0
RESTORE_RET_ABI_STATE
jalr a2
ENDPROC(return_to_handler)
SYM_FUNC_END(return_to_handler)
#endif

#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(MCOUNT_NAME)
SYM_FUNC_START(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
Expand Down Expand Up @@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME)
jalr t5
RESTORE_ABI_STATE
ret
ENDPROC(MCOUNT_NAME)
SYM_FUNC_END(MCOUNT_NAME)
#endif
EXPORT_SYMBOL(MCOUNT_NAME)
4 changes: 2 additions & 2 deletions arch/riscv/kernel/probes/rethook_trampoline.S
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
REG_L x31, PT_T6(sp)
.endm

ENTRY(arch_rethook_trampoline)
SYM_CODE_START(arch_rethook_trampoline)
addi sp, sp, -(PT_SIZE_ON_STACK)
save_all_base_regs

Expand All @@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
addi sp, sp, PT_SIZE_ON_STACK

ret
ENDPROC(arch_rethook_trampoline)
SYM_CODE_END(arch_rethook_trampoline)
4 changes: 2 additions & 2 deletions arch/riscv/kernel/suspend_entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
.altmacro
.option norelax

ENTRY(__cpu_suspend_enter)
SYM_FUNC_START(__cpu_suspend_enter)
/* Save registers (except A0 and T0-T6) */
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
Expand Down Expand Up @@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter)

/* Return to C code */
ret
END(__cpu_suspend_enter)
SYM_FUNC_END(__cpu_suspend_enter)

SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/kernel/vdso/flush_icache.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

.text
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
ENTRY(__vdso_flush_icache)
SYM_FUNC_START(__vdso_flush_icache)
.cfi_startproc
#ifdef CONFIG_SMP
li a7, __NR_riscv_flush_icache
Expand All @@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
#endif
ret
.cfi_endproc
ENDPROC(__vdso_flush_icache)
SYM_FUNC_END(__vdso_flush_icache)
4 changes: 2 additions & 2 deletions arch/riscv/kernel/vdso/getcpu.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@

.text
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
ENTRY(__vdso_getcpu)
SYM_FUNC_START(__vdso_getcpu)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_getcpu
ecall
ret
.cfi_endproc
ENDPROC(__vdso_getcpu)
SYM_FUNC_END(__vdso_getcpu)
4 changes: 2 additions & 2 deletions arch/riscv/kernel/vdso/rt_sigreturn.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
#include <asm/unistd.h>

.text
ENTRY(__vdso_rt_sigreturn)
SYM_FUNC_START(__vdso_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
li a7, __NR_rt_sigreturn
ecall
.cfi_endproc
ENDPROC(__vdso_rt_sigreturn)
SYM_FUNC_END(__vdso_rt_sigreturn)
4 changes: 2 additions & 2 deletions arch/riscv/kernel/vdso/sys_hwprobe.S
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
#include <asm/unistd.h>

.text
ENTRY(riscv_hwprobe)
SYM_FUNC_START(riscv_hwprobe)
.cfi_startproc
li a7, __NR_riscv_hwprobe
ecall
ret

.cfi_endproc
ENDPROC(riscv_hwprobe)
SYM_FUNC_END(riscv_hwprobe)
6 changes: 3 additions & 3 deletions arch/riscv/lib/memcpy.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
#include <asm/asm.h>

/* void *memcpy(void *, const void *, size_t) */
ENTRY(__memcpy)
WEAK(memcpy)
SYM_FUNC_START(__memcpy)
move t6, a0 /* Preserve return value */

/* Defer to byte-oriented copy for small sizes */
Expand Down Expand Up @@ -105,6 +104,7 @@ WEAK(memcpy)
bltu a1, a3, 5b
6:
ret
END(__memcpy)
SYM_FUNC_END(__memcpy)
SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
3 changes: 1 addition & 2 deletions arch/riscv/lib/memmove.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include <asm/asm.h>

SYM_FUNC_START(__memmove)
SYM_FUNC_START_WEAK(memmove)
/*
* Returns
* a0 - dest
Expand Down Expand Up @@ -312,7 +311,7 @@ SYM_FUNC_START_WEAK(memmove)
.Lreturn_from_memmove:
ret

SYM_FUNC_END(memmove)
SYM_FUNC_END(__memmove)
SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
SYM_FUNC_ALIAS(__pi_memmove, __memmove)
SYM_FUNC_ALIAS(__pi___memmove, __memmove)
6 changes: 3 additions & 3 deletions arch/riscv/lib/memset.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
#include <asm/asm.h>

/* void *memset(void *, int, size_t) */
ENTRY(__memset)
WEAK(memset)
SYM_FUNC_START(__memset)
move t0, a0 /* Preserve return value */

/* Defer to byte-oriented fill for small sizes */
Expand Down Expand Up @@ -110,4 +109,5 @@ WEAK(memset)
bltu t0, a3, 5b
6:
ret
END(__memset)
SYM_FUNC_END(__memset)
SYM_FUNC_ALIAS_WEAK(memset, __memset)
Loading

0 comments on commit 76329c6

Please sign in to comment.