Skip to content

Commit

Permalink
x86/entry/64: Interleave XOR register clearing with PUSH instructions
Browse files Browse the repository at this point in the history
Same as is done for syscalls, interleave XOR with PUSH instructions
for exceptions/interrupts, in order to minimize the cost of the
additional instructions required for register clearing.

Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dan.j.williams@intel.com
Link: http://lkml.kernel.org/r/20180211104949.12992-4-linux@dominikbrodowski.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Dominik Brodowski authored and Ingo Molnar committed Feb 13, 2018
1 parent 502af0d commit f7bafa2
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 30 deletions.
40 changes: 19 additions & 21 deletions arch/x86/entry/calling.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,44 +101,42 @@ For 32-bit we have the following conventions - kernel is built with
addq $-(15*8), %rsp
.endm

.macro SAVE_REGS offset=0
.macro SAVE_AND_CLEAR_REGS offset=0
/*
* Save registers and sanitize registers of values that a
* speculation attack might otherwise want to exploit. The
* lower registers are likely clobbered well before they
* could be put to use in a speculative execution gadget.
* Interleave XOR with PUSH for better uop scheduling:
*/
movq %rdi, 14*8+\offset(%rsp)
movq %rsi, 13*8+\offset(%rsp)
movq %rdx, 12*8+\offset(%rsp)
movq %rcx, 11*8+\offset(%rsp)
movq %rax, 10*8+\offset(%rsp)
movq %r8, 9*8+\offset(%rsp)
xorq %r8, %r8 /* nospec r8 */
movq %r9, 8*8+\offset(%rsp)
xorq %r9, %r9 /* nospec r9 */
movq %r10, 7*8+\offset(%rsp)
xorq %r10, %r10 /* nospec r10 */
movq %r11, 6*8+\offset(%rsp)
xorq %r11, %r11 /* nospec r11 */
movq %rbx, 5*8+\offset(%rsp)
xorl %ebx, %ebx /* nospec rbx */
movq %rbp, 4*8+\offset(%rsp)
xorl %ebp, %ebp /* nospec rbp */
movq %r12, 3*8+\offset(%rsp)
xorq %r12, %r12 /* nospec r12 */
movq %r13, 2*8+\offset(%rsp)
xorq %r13, %r13 /* nospec r13 */
movq %r14, 1*8+\offset(%rsp)
xorq %r14, %r14 /* nospec r14 */
movq %r15, 0*8+\offset(%rsp)
xorq %r15, %r15 /* nospec r15 */
UNWIND_HINT_REGS offset=\offset
.endm

/*
* Sanitize registers of values that a speculation attack
* might otherwise want to exploit. The lower registers are
* likely clobbered well before they could be put to use in
* a speculative execution gadget:
*/
.macro CLEAR_REGS_NOSPEC
xorl %ebp, %ebp
xorl %ebx, %ebx
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
xorq %r11, %r11
xorq %r12, %r12
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
.endm

.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14
Expand Down Expand Up @@ -177,7 +175,7 @@ For 32-bit we have the following conventions - kernel is built with
* is just setting the LSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise.
*
* NOTE: This macro must be used *after* SAVE_REGS because it corrupts
* NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts
* the original rbp.
*/
.macro ENCODE_FRAME_POINTER ptregs_offset=0
Expand Down
30 changes: 21 additions & 9 deletions arch/x86/entry/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -565,8 +565,7 @@ END(irq_entries_start)
1:

ALLOC_PT_GPREGS_ON_STACK
SAVE_REGS
CLEAR_REGS_NOSPEC
SAVE_AND_CLEAR_REGS
ENCODE_FRAME_POINTER

testb $3, CS(%rsp)
Expand Down Expand Up @@ -1114,8 +1113,7 @@ ENTRY(xen_failsafe_callback)
UNWIND_HINT_IRET_REGS
pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK
SAVE_REGS
CLEAR_REGS_NOSPEC
SAVE_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
jmp error_exit
END(xen_failsafe_callback)
Expand Down Expand Up @@ -1159,8 +1157,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
cld
SAVE_REGS 8
CLEAR_REGS_NOSPEC
SAVE_AND_CLEAR_REGS 8
ENCODE_FRAME_POINTER 8
movl $1, %ebx
movl $MSR_GS_BASE, %ecx
Expand Down Expand Up @@ -1211,8 +1208,7 @@ END(paranoid_exit)
ENTRY(error_entry)
UNWIND_HINT_FUNC
cld
SAVE_REGS 8
CLEAR_REGS_NOSPEC
SAVE_AND_CLEAR_REGS 8
ENCODE_FRAME_POINTER 8
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
Expand Down Expand Up @@ -1399,18 +1395,34 @@ ENTRY(nmi)
pushq (%rdx) /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq %rax /* pt_regs->ax */
/*
* Sanitize registers of values that a speculation attack
* might otherwise want to exploit. The lower registers are
* likely clobbered well before they could be put to use in
* a speculative execution gadget. Interleave XOR with PUSH
* for better uop scheduling:
*/
pushq %r8 /* pt_regs->r8 */
xorq %r8, %r8 /* nospec r8 */
pushq %r9 /* pt_regs->r9 */
xorq %r9, %r9 /* nospec r9 */
pushq %r10 /* pt_regs->r10 */
xorq %r10, %r10 /* nospec r10 */
pushq %r11 /* pt_regs->r11 */
xorq %r11, %r11 /* nospec r11*/
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */
xorq %r12, %r12 /* nospec r12*/
pushq %r13 /* pt_regs->r13 */
xorq %r13, %r13 /* nospec r13*/
pushq %r14 /* pt_regs->r14 */
xorq %r14, %r14 /* nospec r14*/
pushq %r15 /* pt_regs->r15 */
xorq %r15, %r15 /* nospec r15*/
UNWIND_HINT_REGS
CLEAR_REGS_NOSPEC
ENCODE_FRAME_POINTER

/*
Expand Down

0 comments on commit f7bafa2

Please sign in to comment.