Skip to content

Commit

Permalink
x86/entry/64: Remove IRQ stack switching ASM
Browse files Browse the repository at this point in the history
No more users.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20200521202120.021462159@linutronix.de
  • Loading branch information
Thomas Gleixner committed Jun 11, 2020
1 parent 75da04f commit e3e5c64
Showing 1 changed file with 0 additions and 96 deletions.
96 changes: 0 additions & 96 deletions arch/x86/entry/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -370,102 +370,6 @@ SYM_CODE_END(ret_from_fork)
#endif
.endm

/*
* Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers
* flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
* Requires kernel GSBASE.
*
* The invariant is that, if irq_count != -1, then the IRQ stack is in use.
*/
.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
DEBUG_ENTRY_ASSERT_IRQS_OFF

.if \save_ret
/*
* If save_ret is set, the original stack contains one additional
* entry -- the return address. Therefore, move the address one
* entry below %rsp to \old_rsp.
*/
leaq 8(%rsp), \old_rsp
.else
movq %rsp, \old_rsp
.endif

.if \regs
UNWIND_HINT_REGS base=\old_rsp
.endif

incl PER_CPU_VAR(irq_count)
jnz .Lirq_stack_push_old_rsp_\@

/*
* Right now, if we just incremented irq_count to zero, we've
* claimed the IRQ stack but we haven't switched to it yet.
*
* If anything is added that can interrupt us here without using IST,
* it must be *extremely* careful to limit its stack usage. This
* could include kprobes and a hypothetical future IST-less #DB
* handler.
*
* The OOPS unwinder relies on the word at the top of the IRQ
* stack linking back to the previous RSP for the entire time we're
* on the IRQ stack. For this to work reliably, we need to write
* it before we actually move ourselves to the IRQ stack.
*/

movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
movq PER_CPU_VAR(hardirq_stack_ptr), %rsp

#ifdef CONFIG_DEBUG_ENTRY
/*
* If the first movq above becomes wrong due to IRQ stack layout
* changes, the only way we'll notice is if we try to unwind right
* here. Assert that we set up the stack right to catch this type
* of bug quickly.
*/
cmpq -8(%rsp), \old_rsp
je .Lirq_stack_okay\@
ud2
.Lirq_stack_okay\@:
#endif

.Lirq_stack_push_old_rsp_\@:
pushq \old_rsp

.if \regs
UNWIND_HINT_REGS indirect=1
.endif

.if \save_ret
/*
* Push the return address to the stack. This return address can
* be found at the "real" original RSP, which was offset by 8 at
* the beginning of this macro.
*/
pushq -8(\old_rsp)
.endif
.endm

/*
* Undoes ENTER_IRQ_STACK.
*/
.macro LEAVE_IRQ_STACK regs=1
DEBUG_ENTRY_ASSERT_IRQS_OFF
/* We need to be off the IRQ stack before decrementing irq_count. */
popq %rsp

.if \regs
UNWIND_HINT_REGS
.endif

/*
* As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
* the irq stack but we're not on it.
*/

decl PER_CPU_VAR(irq_count)
.endm

/**
* idtentry_body - Macro to emit code calling the C function
* @cfunc: C function to be called
Expand Down

0 comments on commit e3e5c64

Please sign in to comment.