Skip to content

Commit

Permalink
Merge branches 'for-next/asm' and 'for-next/insn' into for-next/bti
Browse files Browse the repository at this point in the history
Merge in dependencies for in-kernel Branch Target Identification support.

* for-next/asm:
  arm64: Disable old style assembly annotations
  arm64: kernel: Convert to modern annotations for assembly functions
  arm64: entry: Refactor and modernise annotation for ret_to_user
  x86/asm: Provide a Kconfig symbol for disabling old assembly annotations
  x86/32: Remove CONFIG_DOUBLEFAULT

* for-next/insn:
  arm64: insn: Report PAC and BTI instructions as skippable
  arm64: insn: Don't assume unrecognized HINTs are skippable
  arm64: insn: Provide a better name for aarch64_insn_is_nop()
  arm64: insn: Add constants for new HINT instruction decode
  • Loading branch information
Will Deacon committed May 5, 2020
3 parents 80e4e56 + 50479d5 + 47d67e4 commit e515982
Show file tree
Hide file tree
Showing 27 changed files with 144 additions and 126 deletions.
1 change: 1 addition & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ config ARM64
select ARCH_USE_GNU_PROPERTY
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
Expand Down
30 changes: 27 additions & 3 deletions arch/arm64/include/asm/insn.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,37 @@ enum aarch64_insn_encoding_class {
* system instructions */
};

enum aarch64_insn_hint_op {
enum aarch64_insn_hint_cr_op {
AARCH64_INSN_HINT_NOP = 0x0 << 5,
AARCH64_INSN_HINT_YIELD = 0x1 << 5,
AARCH64_INSN_HINT_WFE = 0x2 << 5,
AARCH64_INSN_HINT_WFI = 0x3 << 5,
AARCH64_INSN_HINT_SEV = 0x4 << 5,
AARCH64_INSN_HINT_SEVL = 0x5 << 5,

AARCH64_INSN_HINT_XPACLRI = 0x07 << 5,
AARCH64_INSN_HINT_PACIA_1716 = 0x08 << 5,
AARCH64_INSN_HINT_PACIB_1716 = 0x0A << 5,
AARCH64_INSN_HINT_AUTIA_1716 = 0x0C << 5,
AARCH64_INSN_HINT_AUTIB_1716 = 0x0E << 5,
AARCH64_INSN_HINT_PACIAZ = 0x18 << 5,
AARCH64_INSN_HINT_PACIASP = 0x19 << 5,
AARCH64_INSN_HINT_PACIBZ = 0x1A << 5,
AARCH64_INSN_HINT_PACIBSP = 0x1B << 5,
AARCH64_INSN_HINT_AUTIAZ = 0x1C << 5,
AARCH64_INSN_HINT_AUTIASP = 0x1D << 5,
AARCH64_INSN_HINT_AUTIBZ = 0x1E << 5,
AARCH64_INSN_HINT_AUTIBSP = 0x1F << 5,

AARCH64_INSN_HINT_ESB = 0x10 << 5,
AARCH64_INSN_HINT_PSB = 0x11 << 5,
AARCH64_INSN_HINT_TSB = 0x12 << 5,
AARCH64_INSN_HINT_CSDB = 0x14 << 5,

AARCH64_INSN_HINT_BTI = 0x20 << 5,
AARCH64_INSN_HINT_BTIC = 0x22 << 5,
AARCH64_INSN_HINT_BTIJ = 0x24 << 5,
AARCH64_INSN_HINT_BTIJC = 0x26 << 5,
};

enum aarch64_insn_imm_type {
Expand Down Expand Up @@ -344,7 +368,7 @@ __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)

#undef __AARCH64_INSN_FUNCS

bool aarch64_insn_is_nop(u32 insn);
bool aarch64_insn_is_steppable_hint(u32 insn);
bool aarch64_insn_is_branch_imm(u32 insn);

static inline bool aarch64_insn_is_adr_adrp(u32 insn)
Expand All @@ -370,7 +394,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_condition cond);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op);
u32 aarch64_insn_gen_nop(void);
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
enum aarch64_insn_branch_type type);
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kernel/cpu-reset.S
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
* branch to what would be the reset vector. It must be executed with the
* flat identity mapping.
*/
ENTRY(__cpu_soft_restart)
SYM_CODE_START(__cpu_soft_restart)
/* Clear sctlr_el1 flags. */
mrs x12, sctlr_el1
mov_q x13, SCTLR_ELx_FLAGS
Expand All @@ -47,6 +47,6 @@ ENTRY(__cpu_soft_restart)
mov x1, x3 // arg1
mov x2, x4 // arg2
br x8
ENDPROC(__cpu_soft_restart)
SYM_CODE_END(__cpu_soft_restart)

.popsection
4 changes: 2 additions & 2 deletions arch/arm64/kernel/efi-rt-wrapper.S
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

#include <linux/linkage.h>

ENTRY(__efi_rt_asm_wrapper)
SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]!
mov x29, sp

Expand Down Expand Up @@ -35,4 +35,4 @@ ENTRY(__efi_rt_asm_wrapper)
b.ne 0f
ret
0: b efi_handle_corrupted_x18 // tail call
ENDPROC(__efi_rt_asm_wrapper)
SYM_FUNC_END(__efi_rt_asm_wrapper)
20 changes: 10 additions & 10 deletions arch/arm64/kernel/entry-fpsimd.S
Original file line number Diff line number Diff line change
Expand Up @@ -16,34 +16,34 @@
*
* x0 - pointer to struct fpsimd_state
*/
ENTRY(fpsimd_save_state)
SYM_FUNC_START(fpsimd_save_state)
fpsimd_save x0, 8
ret
ENDPROC(fpsimd_save_state)
SYM_FUNC_END(fpsimd_save_state)

/*
* Load the FP registers.
*
* x0 - pointer to struct fpsimd_state
*/
ENTRY(fpsimd_load_state)
SYM_FUNC_START(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
SYM_FUNC_END(fpsimd_load_state)

#ifdef CONFIG_ARM64_SVE
ENTRY(sve_save_state)
SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2
ret
ENDPROC(sve_save_state)
SYM_FUNC_END(sve_save_state)

ENTRY(sve_load_state)
SYM_FUNC_START(sve_load_state)
sve_load 0, x1, x2, 3, x4
ret
ENDPROC(sve_load_state)
SYM_FUNC_END(sve_load_state)

ENTRY(sve_get_vl)
SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1
ret
ENDPROC(sve_get_vl)
SYM_FUNC_END(sve_get_vl)
#endif /* CONFIG_ARM64_SVE */
27 changes: 14 additions & 13 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -727,21 +727,10 @@ el0_error_naked:
b ret_to_user
SYM_CODE_END(el0_error)

/*
* Ok, we need to do extra processing, enter the slow path.
*/
work_pending:
mov x0, sp // 'regs'
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/*
* "slow" syscall return path.
*/
ret_to_user:
SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
ldr x1, [tsk, #TSK_TI_FLAGS]
Expand All @@ -753,7 +742,19 @@ finish_ret_to_user:
bl stackleak_erase
#endif
kernel_exit 0
ENDPROC(ret_to_user)

/*
* Ok, we need to do extra processing, enter the slow path.
*/
work_pending:
mov x0, sp // 'regs'
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user)

.popsection // .entry.text

Expand Down
16 changes: 8 additions & 8 deletions arch/arm64/kernel/hibernate-asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
* x5: physical address of a zero page that remains zero after resume
*/
.pushsection ".hibernate_exit.text", "ax"
ENTRY(swsusp_arch_suspend_exit)
SYM_CODE_START(swsusp_arch_suspend_exit)
/*
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
Expand Down Expand Up @@ -110,7 +110,7 @@ ENTRY(swsusp_arch_suspend_exit)
cbz x24, 3f /* Do we need to re-initialise EL2? */
hvc #0
3: ret
ENDPROC(swsusp_arch_suspend_exit)
SYM_CODE_END(swsusp_arch_suspend_exit)

/*
* Restore the hyp stub.
Expand All @@ -119,15 +119,15 @@ ENDPROC(swsusp_arch_suspend_exit)
*
* x24: The physical address of __hyp_stub_vectors
*/
el1_sync:
SYM_CODE_START_LOCAL(el1_sync)
msr vbar_el2, x24
eret
ENDPROC(el1_sync)
SYM_CODE_END(el1_sync)

.macro invalid_vector label
\label:
SYM_CODE_START_LOCAL(\label)
b \label
ENDPROC(\label)
SYM_CODE_END(\label)
.endm

invalid_vector el2_sync_invalid
Expand All @@ -141,7 +141,7 @@ ENDPROC(\label)

/* el2 vectors - switch el2 here while we restore the memory image. */
.align 11
ENTRY(hibernate_el2_vectors)
SYM_CODE_START(hibernate_el2_vectors)
ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t
Expand All @@ -161,6 +161,6 @@ ENTRY(hibernate_el2_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
END(hibernate_el2_vectors)
SYM_CODE_END(hibernate_el2_vectors)

.popsection
20 changes: 10 additions & 10 deletions arch/arm64/kernel/hyp-stub.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

.align 11

ENTRY(__hyp_stub_vectors)
SYM_CODE_START(__hyp_stub_vectors)
ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t
Expand All @@ -41,11 +41,11 @@ ENTRY(__hyp_stub_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__hyp_stub_vectors)
SYM_CODE_END(__hyp_stub_vectors)

.align 11

el1_sync:
SYM_CODE_START_LOCAL(el1_sync)
cmp x0, #HVC_SET_VECTORS
b.ne 2f
msr vbar_el2, x1
Expand All @@ -68,12 +68,12 @@ el1_sync:

9: mov x0, xzr
eret
ENDPROC(el1_sync)
SYM_CODE_END(el1_sync)

.macro invalid_vector label
\label:
SYM_CODE_START_LOCAL(\label)
b \label
ENDPROC(\label)
SYM_CODE_END(\label)
.endm

invalid_vector el2_sync_invalid
Expand Down Expand Up @@ -106,15 +106,15 @@ ENDPROC(\label)
* initialisation entry point.
*/

ENTRY(__hyp_set_vectors)
SYM_FUNC_START(__hyp_set_vectors)
mov x1, x0
mov x0, #HVC_SET_VECTORS
hvc #0
ret
ENDPROC(__hyp_set_vectors)
SYM_FUNC_END(__hyp_set_vectors)

ENTRY(__hyp_reset_vectors)
SYM_FUNC_START(__hyp_reset_vectors)
mov x0, #HVC_RESET_VECTORS
hvc #0
ret
ENDPROC(__hyp_reset_vectors)
SYM_FUNC_END(__hyp_reset_vectors)
32 changes: 22 additions & 10 deletions arch/arm64/kernel/insn.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,21 +51,33 @@ enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
}

/* NOP is an alias of HINT */
bool __kprobes aarch64_insn_is_nop(u32 insn)
bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
{
if (!aarch64_insn_is_hint(insn))
return false;

switch (insn & 0xFE0) {
case AARCH64_INSN_HINT_YIELD:
case AARCH64_INSN_HINT_WFE:
case AARCH64_INSN_HINT_WFI:
case AARCH64_INSN_HINT_SEV:
case AARCH64_INSN_HINT_SEVL:
return false;
default:
case AARCH64_INSN_HINT_XPACLRI:
case AARCH64_INSN_HINT_PACIA_1716:
case AARCH64_INSN_HINT_PACIB_1716:
case AARCH64_INSN_HINT_AUTIA_1716:
case AARCH64_INSN_HINT_AUTIB_1716:
case AARCH64_INSN_HINT_PACIAZ:
case AARCH64_INSN_HINT_PACIASP:
case AARCH64_INSN_HINT_PACIBZ:
case AARCH64_INSN_HINT_PACIBSP:
case AARCH64_INSN_HINT_AUTIAZ:
case AARCH64_INSN_HINT_AUTIASP:
case AARCH64_INSN_HINT_AUTIBZ:
case AARCH64_INSN_HINT_AUTIBSP:
case AARCH64_INSN_HINT_BTI:
case AARCH64_INSN_HINT_BTIC:
case AARCH64_INSN_HINT_BTIJ:
case AARCH64_INSN_HINT_BTIJC:
case AARCH64_INSN_HINT_NOP:
return true;
default:
return false;
}
}

Expand Down Expand Up @@ -574,7 +586,7 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
offset >> 2);
}

u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
{
return aarch64_insn_get_hint_value() | op;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/probes/decode-insn.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
* except for the NOP case.
*/
if (aarch64_insn_is_hint(insn))
return aarch64_insn_is_nop(insn);
return aarch64_insn_is_steppable_hint(insn);

return true;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kernel/probes/kprobes_trampoline.S
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
ldp x28, x29, [sp, #S_X28]
.endm

ENTRY(kretprobe_trampoline)
SYM_CODE_START(kretprobe_trampoline)
sub sp, sp, #S_FRAME_SIZE

save_all_base_regs
Expand All @@ -79,4 +79,4 @@ ENTRY(kretprobe_trampoline)
add sp, sp, #S_FRAME_SIZE
ret

ENDPROC(kretprobe_trampoline)
SYM_CODE_END(kretprobe_trampoline)
Loading

0 comments on commit e515982

Please sign in to comment.