Skip to content

Commit

Permalink
arm64: entry: convert IRQ+FIQ handlers to C
Browse files Browse the repository at this point in the history
For various reasons we'd like to convert the bulk of arm64's exception
triage logic to C. As a step towards that, this patch converts the EL1
and EL0 IRQ+FIQ triage logic to C.

Separate C functions are added for the native and compat cases so that
in subsequent patches we can handle native/compat differences in C.

Since the triage functions can now call arm64_apply_bp_hardening()
directly, the do_el0_irq_bp_hardening() wrapper function is removed.

Since the user_exit_irqoff macro is now unused, it is removed. The
user_enter_irqoff macro is still used by the ret_to_user code, and
cannot be removed at this time.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
  • Loading branch information
Mark Rutland authored and Will Deacon committed Jun 7, 2021
1 parent f804948 commit 064dbfb
Show file tree
Hide file tree
Showing 5 changed files with 110 additions and 108 deletions.
8 changes: 6 additions & 2 deletions arch/arm64/include/asm/exception.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,18 @@ static inline u32 disr_to_esr(u64 disr)
}

asmlinkage void el1_sync_handler(struct pt_regs *regs);
asmlinkage void el1_irq_handler(struct pt_regs *regs);
asmlinkage void el1_fiq_handler(struct pt_regs *regs);
asmlinkage void el1_error_handler(struct pt_regs *regs);
asmlinkage void el0_sync_handler(struct pt_regs *regs);
asmlinkage void el0_irq_handler(struct pt_regs *regs);
asmlinkage void el0_fiq_handler(struct pt_regs *regs);
asmlinkage void el0_error_handler(struct pt_regs *regs);
asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
asmlinkage void el0_irq_compat_handler(struct pt_regs *regs);
asmlinkage void el0_fiq_compat_handler(struct pt_regs *regs);
asmlinkage void el0_error_compat_handler(struct pt_regs *regs);

asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void);
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -257,8 +257,6 @@ void set_task_sctlr_el1(u64 sctlr);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);

asmlinkage void arm64_preempt_schedule_irq(void);

#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)

Expand Down
93 changes: 90 additions & 3 deletions arch/arm64/kernel/entry-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
#include <asm/sysreg.h>

/*
Expand Down Expand Up @@ -101,23 +103,23 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit();
}

asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
else
enter_from_kernel_mode(regs);
}

asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
else
exit_to_kernel_mode(regs);
}

asmlinkage void __sched arm64_preempt_schedule_irq(void)
static void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();

Expand All @@ -142,6 +144,18 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void)
preempt_schedule_irq();
}

static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
if (on_thread_stack())
call_on_irq_stack(regs, handler);
else
handler(regs);
}

extern void (*handle_arch_irq)(struct pt_regs *);
extern void (*handle_arch_fiq)(struct pt_regs *);

#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);

Expand Down Expand Up @@ -308,6 +322,36 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
}
}

static void noinstr el1_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);

enter_el1_irq_or_nmi(regs);
do_interrupt_handler(regs, handler);

/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
* preempt_count().
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
READ_ONCE(current_thread_info()->preempt_count) == 0)
arm64_preempt_schedule_irq();

exit_el1_irq_or_nmi(regs);
}

asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_irq);
}

asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_fiq);
}

asmlinkage void noinstr el1_error_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
Expand Down Expand Up @@ -507,6 +551,39 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
}
}

static void noinstr el0_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
enter_from_user_mode();

write_sysreg(DAIF_PROCCTX_NOIRQ, daif);

if (regs->pc & BIT(55))
arm64_apply_bp_hardening();

do_interrupt_handler(regs, handler);
}

static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_irq);
}

asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}

static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_fiq);
}

asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}

static void __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
Expand Down Expand Up @@ -583,6 +660,16 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
}
}

asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}

asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}

asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs)
{
__el0_error_handler_common(regs);
Expand Down
108 changes: 14 additions & 94 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
.macro user_exit_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl enter_from_user_mode
#endif
.endm

.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
Expand Down Expand Up @@ -486,63 +480,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0)
#endif

.macro irq_stack_entry
mov x19, sp // preserve the original sp
#ifdef CONFIG_SHADOW_CALL_STACK
mov x24, scs_sp // preserve the original shadow stack
#endif

/*
* Compare sp with the base of the task stack.
* If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
* and should switch to the irq stack.
*/
ldr x25, [tsk, TSK_STACK]
eor x25, x25, x19
and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f

ldr_this_cpu x25, irq_stack_ptr, x26
mov x26, #IRQ_STACK_SIZE
add x26, x25, x26

/* switch to the irq stack */
mov sp, x26

#ifdef CONFIG_SHADOW_CALL_STACK
/* also switch to the irq shadow stack */
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
#endif

9998:
.endm

/*
* The callee-saved regs (x19-x29) should be preserved between
* irq_stack_entry and irq_stack_exit, but note that kernel_entry
* uses x20-x23 to store data for later use.
*/
.macro irq_stack_exit
mov sp, x19
#ifdef CONFIG_SHADOW_CALL_STACK
mov scs_sp, x24
#endif
.endm

/* GPRs used by entry code */
tsk .req x28 // current thread_info

/*
* Interrupt handling.
*/
.macro irq_handler, handler:req
ldr_l x1, \handler
mov x0, sp
irq_stack_entry
blr x1
irq_stack_exit
.endm

.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
Expand All @@ -552,35 +495,6 @@ tsk .req x28 // current thread_info
#endif
.endm

.macro el1_interrupt_handler, handler:req
enable_da

mov x0, sp
bl enter_el1_irq_or_nmi

irq_handler \handler

#ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz x24, 1f // preempt count != 0
bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif

mov x0, sp
bl exit_el1_irq_or_nmi
.endm

.macro el0_interrupt_handler, handler:req
user_exit_irqoff
enable_da

tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler \handler
.endm

.text

/*
Expand Down Expand Up @@ -704,13 +618,15 @@ SYM_CODE_END(el1_sync)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
el1_interrupt_handler handle_arch_irq
mov x0, sp
bl el1_irq_handler
kernel_exit 1
SYM_CODE_END(el1_irq)

SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
kernel_entry 1
el1_interrupt_handler handle_arch_fiq
mov x0, sp
bl el1_fiq_handler
kernel_exit 1
SYM_CODE_END(el1_fiq)

Expand All @@ -737,12 +653,16 @@ SYM_CODE_END(el0_sync_compat)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
b el0_irq_naked
mov x0, sp
bl el0_irq_compat_handler
b ret_to_user
SYM_CODE_END(el0_irq_compat)

SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
kernel_entry 0, 32
b el0_fiq_naked
mov x0, sp
bl el0_fiq_compat_handler
b ret_to_user
SYM_CODE_END(el0_fiq_compat)

SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
Expand All @@ -756,15 +676,15 @@ SYM_CODE_END(el0_error_compat)
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
el0_interrupt_handler handle_arch_irq
mov x0, sp
bl el0_irq_handler
b ret_to_user
SYM_CODE_END(el0_irq)

SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
kernel_entry 0
el0_fiq_naked:
el0_interrupt_handler handle_arch_fiq
mov x0, sp
bl el0_fiq_handler
b ret_to_user
SYM_CODE_END(el0_fiq)

Expand Down
7 changes: 0 additions & 7 deletions arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -836,13 +836,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);

void do_el0_irq_bp_hardening(void)
{
/* PC has already been checked in entry.S */
arm64_apply_bp_hardening();
}
NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);

void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
Expand Down

0 comments on commit 064dbfb

Please sign in to comment.