From ba5ca5e5e6a1d55923e88b4a83da452166f5560e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 11 Aug 2023 08:52:55 -0700 Subject: [PATCH 01/18] x86/retpoline: Don't clobber RFLAGS during srso_safe_ret() Use LEA instead of ADD when adjusting %rsp in srso_safe_ret{,_alias}() so as to avoid clobbering flags. Drop one of the INT3 instructions to account for the LEA consuming one more byte than the ADD. KVM's emulator makes indirect calls into a jump table of sorts, where the destination of each call is a small blob of code that performs fast emulation by executing the target instruction with fixed operands. E.g. to emulate ADC, fastop() invokes adcb_al_dl(): adcb_al_dl: <+0>: adc %dl,%al <+2>: jmp <__x86_return_thunk> A major motivation for doing fast emulation is to leverage the CPU to handle consumption and manipulation of arithmetic flags, i.e. RFLAGS is both an input and output to the target of the call. fastop() collects the RFLAGS result by pushing RFLAGS onto the stack and popping them back into a variable (held in %rdi in this case): asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" <+71>: mov 0xc0(%r8),%rdx <+78>: mov 0x100(%r8),%rcx <+85>: push %rdi <+86>: popf <+87>: call *%rsi <+89>: nop <+90>: nop <+91>: nop <+92>: pushf <+93>: pop %rdi and then propagating the arithmetic flags into the vCPU's emulator state: ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); <+64>: and $0xfffffffffffff72a,%r9 <+94>: and $0x8d5,%edi <+109>: or %rdi,%r9 <+122>: mov %r9,0x10(%r8) The failures can be most easily reproduced by running the "emulator" test in KVM-Unit-Tests. If you're feeling a bit of deja vu, see commit b63f20a778c8 ("x86/retpoline: Don't clobber RFLAGS during CALL_NOSPEC on i386"). In addition, this breaks booting of clang-compiled guest on a gcc-compiled host where the host contains the %rsp-modifying SRSO mitigations. [ bp: Massage commit message, extend, remove addresses. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Closes: https://lore.kernel.org/all/de474347-122d-54cd-eabf-9dcc95ab9eae@amd.com Reported-by: Srikanth Aithal Reported-by: Nathan Chancellor Signed-off-by: Sean Christopherson Signed-off-by: Borislav Petkov (AMD) Tested-by: Nathan Chancellor Cc: stable@vger.kernel.org Link: https://lore.kernel.org/20230810013334.GA5354@dev-arch.thelio-3990X/ Link: https://lore.kernel.org/r/20230811155255.250835-1-seanjc@google.com --- arch/x86/lib/retpoline.S | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 2cff585f22f29..132cedbf9e571 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -164,7 +164,7 @@ __EXPORT_THUNK(srso_untrain_ret_alias) /* Needs a definition for the __x86_return_thunk alternative below. */ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) #ifdef CONFIG_CPU_SRSO - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC #endif ANNOTATE_UNRET_SAFE @@ -239,7 +239,7 @@ __EXPORT_THUNK(zen_untrain_ret) * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -252,11 +252,10 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) .byte 0x48, 0xb8 SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 lfence call srso_safe_ret int3 From f58d6fbcb7c848b7f2469be339bc571f2e9d245b Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 11 Aug 2023 23:38:24 +0200 Subject: [PATCH 02/18] x86/CPU/AMD: Fix the DIV(0) initial fix attempt Initially, it was thought that doing an innocuous division in the #DE handler would take care to prevent any leaking of old data from the divider but by the time the fault is raised, the speculation has already advanced too far and such data could already have been used by younger operations. Therefore, do the innocuous division on every exit to userspace so that userspace doesn't see any potentially old data from integer divisions in kernel space. Do the same before VMRUN too, to protect host data from leaking into the guest too. Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de --- arch/x86/include/asm/entry-common.h | 1 + arch/x86/kernel/cpu/amd.c | 1 + arch/x86/kernel/traps.c | 2 -- arch/x86/kvm/svm/svm.c | 2 ++ 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 117903881fe43..ce8f50192ae3e 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 70f9d56f93057..7eca6a8abbb1c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1885326a8f659..4a817d20ce3bb 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); - - amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 03e852dedcc1b..d4bfdc607fe7f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4006,6 +4006,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in guest_state_enter_irqoff(); + amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); else From e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sun, 13 Aug 2023 12:39:34 +0200 Subject: [PATCH 03/18] x86/srso: Disable the mitigation on unaffected configurations Skip the srso cmd line parsing which is not needed on Zen1/2 with SMT disabled and with the proper microcode applied (latter should be the case anyway) as those are not affected. Fixes: 5a15d8348881 ("x86/srso: Tie SBPB bit setting to microcode patch detection") Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230813104517.3346-1-bp@alien8.de --- arch/x86/kernel/cpu/bugs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d02f73c5339dd..6c04aef4b63b2 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2418,8 +2418,10 @@ static void __init srso_select_mitigation(void) * IBPB microcode has been applied. */ if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2696,6 +2698,9 @@ static ssize_t retbleed_show_state(char *buf) static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Not affected\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); From 79cd2a11224eab86d6673fe8a11d2046ae9d2757 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Tue, 11 Jul 2023 11:19:51 +0200 Subject: [PATCH 04/18] x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG The linker script arch/x86/kernel/vmlinux.lds.S matches the thunk sections ".text.__x86.*" from arch/x86/lib/retpoline.S as follows: .text { [...] TEXT_TEXT [...] __indirect_thunk_start = .; *(.text.__x86.*) __indirect_thunk_end = .; [...] } Macro TEXT_TEXT references TEXT_MAIN which normally expands to only ".text". However, with CONFIG_LTO_CLANG, TEXT_MAIN becomes ".text .text.[0-9a-zA-Z_]*" which wrongly matches also the thunk sections. The output layout is then different than expected. For instance, the currently defined range [__indirect_thunk_start, __indirect_thunk_end] becomes empty. Prevent the problem by using ".." as the first separator, for example, ".text..__x86.indirect_thunk". This pattern is utilized by other explicit section names which start with one of the standard prefixes, such as ".text" or ".data", and that need to be individually selected in the linker script. [ nathan: Fix conflicts with SRSO and fold in fix issue brought up by Andrew Cooper in post-review: https://lore.kernel.org/20230803230323.1478869-1-andrew.cooper3@citrix.com ] Fixes: dc5723b02e52 ("kbuild: add support for Clang LTO") Signed-off-by: Petr Pavlu Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Nathan Chancellor Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230711091952.27944-2-petr.pavlu@suse.com --- arch/x86/kernel/vmlinux.lds.S | 8 ++++---- arch/x86/lib/retpoline.S | 8 ++++---- tools/objtool/check.c | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index ef06211bae4cc..dfb8783cb4c76 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -134,15 +134,15 @@ SECTIONS SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) __indirect_thunk_end = .; #endif STATIC_CALL_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT @@ -153,7 +153,7 @@ SECTIONS * definition. */ . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 132cedbf9e571..8db74d811ce2e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -13,7 +13,7 @@ #include #include - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro POLINE reg @@ -148,7 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) * As a result, srso_safe_ret_alias() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR @@ -158,7 +158,7 @@ SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) SYM_FUNC_END(srso_untrain_ret_alias) __EXPORT_THUNK(srso_untrain_ret_alias) - .section .text.__x86.rethunk_safe + .section .text..__x86.rethunk_safe #endif /* Needs a definition for the __x86_return_thunk alternative below. */ @@ -172,7 +172,7 @@ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) int3 SYM_FUNC_END(srso_safe_ret_alias) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 8936a05f0e5ac..e2ee10ce7703f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -389,7 +389,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || !strcmp(sec->name, ".cpuidle.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; /* From 833fd800bf56b74d39d71d3f5936dffb3e0409c6 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Tue, 11 Jul 2023 11:19:52 +0200 Subject: [PATCH 05/18] x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT The kprobes optimization check can_optimize() calls insn_is_indirect_jump() to detect indirect jump instructions in a target function. If any is found, creating an optprobe is disallowed in the function because the jump could be from a jump table and could potentially land in the middle of the target optprobe. With retpolines, insn_is_indirect_jump() additionally looks for calls to indirect thunks which the compiler potentially used to replace original jumps. This extra check is however unnecessary because jump tables are disabled when the kernel is built with retpolines. The same is currently the case with IBT. Based on this observation, remove the logic to look for calls to indirect thunks and skip the check for indirect jumps altogether if the kernel is built with retpolines or IBT. Remove subsequently the symbols __indirect_thunk_start and __indirect_thunk_end which are no longer needed. Dropping this logic indirectly fixes a problem where the range [__indirect_thunk_start, __indirect_thunk_end] wrongly included also the return thunk. It caused that machines which used the return thunk as a mitigation and didn't have it patched by any alternative ended up not being able to use optprobes in any regular function. Fixes: 0b53c374b9ef ("x86/retpoline: Use -mfunction-return") Suggested-by: Peter Zijlstra (Intel) Suggested-by: Masami Hiramatsu (Google) Signed-off-by: Petr Pavlu Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Acked-by: Masami Hiramatsu (Google) Link: https://lore.kernel.org/r/20230711091952.27944-3-petr.pavlu@suse.com --- arch/x86/include/asm/nospec-branch.h | 3 --- arch/x86/kernel/kprobes/opt.c | 40 +++++++++++----------------- arch/x86/kernel/vmlinux.lds.S | 2 -- tools/perf/util/thread-stack.c | 4 +-- 4 files changed, 17 insertions(+), 32 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 3faf044569a5d..e50db53a4cb9f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -478,9 +478,6 @@ enum ssb_mitigation { SPEC_STORE_BYPASS_SECCOMP, }; -extern char __indirect_thunk_start[]; -extern char __indirect_thunk_end[]; - static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 57b0037d0a996..517821b48391a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) } /* Check whether insn is indirect jump */ -static int __insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); } -static int insn_is_indirect_jump(struct insn *insn) -{ - int ret = __insn_is_indirect_jump(insn); - -#ifdef CONFIG_RETPOLINE - /* - * Jump to x86_indirect_thunk_* is treated as an indirect jump. - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with - * older gcc may use indirect jump. So we add this check instead of - * replace indirect-jump check. - */ - if (!ret) - ret = insn_jump_into_range(insn, - (unsigned long)__indirect_thunk_start, - (unsigned long)__indirect_thunk_end - - (unsigned long)__indirect_thunk_start); -#endif - return ret; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr) /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, + /* + * Check any instructions don't jump into target, indirectly or + * directly. + * + * The indirect case is present to handle a code with jump + * tables. When the kernel uses retpolines, the check should in + * theory additionally look for jumps to indirect thunks. + * However, the kernel built with retpolines or IBT has jump + * tables disabled so the check can be skipped altogether. + */ + if (!IS_ENABLED(CONFIG_RETPOLINE) && + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && + insn_is_indirect_jump(&insn)) + return 0; + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, DISP32_SIZE)) return 0; addr += insn.length; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index dfb8783cb4c76..8e2a306e45b53 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -133,10 +133,8 @@ SECTIONS KPROBES_TEXT SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE - __indirect_thunk_start = .; *(.text..__x86.indirect_thunk) *(.text..__x86.return_thunk) - __indirect_thunk_end = .; #endif STATIC_CALL_TEXT diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index 374d142e7390d..c6a0a27b12c2a 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -1038,9 +1038,7 @@ static int thread_stack__trace_end(struct thread_stack *ts, static bool is_x86_retpoline(const char *name) { - const char *p = strstr(name, "__x86_indirect_thunk_"); - - return p == name || !strcmp(name, "__indirect_thunk_start"); + return strstr(name, "__x86_indirect_thunk_") == name; } /* From 77f67119004296a9b2503b377d610e08b08afc2a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:27 +0200 Subject: [PATCH 06/18] x86/cpu: Fix __x86_return_thunk symbol type Commit fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") reimplemented __x86_return_thunk with a mix of SYM_FUNC_START and SYM_CODE_END, this is not a sane combination. Since nothing should ever actually 'CALL' this, make it consistently CODE. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.571027074@infradead.org --- arch/x86/lib/retpoline.S | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 8db74d811ce2e..9427480cfae98 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -263,7 +263,9 @@ SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_FUNC_START(__x86_return_thunk) +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS int3 From af023ef335f13c8b579298fc432daeef609a9e60 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:28 +0200 Subject: [PATCH 07/18] x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() vmlinux.o: warning: objtool: srso_untrain_ret() falls through to next function __x86_return_skl() vmlinux.o: warning: objtool: __x86_return_thunk() falls through to next function __x86_return_skl() This is because these functions (can) end with CALL, which objtool does not consider a terminating instruction. Therefore, replace the INT3 instruction (which is a non-fatal trap) with UD2 (which is a fatal-trap). This indicates execution will not continue past this point. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.637802730@infradead.org --- arch/x86/lib/retpoline.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 9427480cfae98..a478eb5bbf62a 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -258,7 +258,7 @@ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) int3 lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) @@ -268,7 +268,7 @@ SYM_CODE_START(__x86_return_thunk) ANNOTATE_NOENDBR ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS - int3 + ud2 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) From 4ae68b26c3ab5a82aa271e6e9fc9b1a06e1d6b40 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:29 +0200 Subject: [PATCH 08/18] objtool/x86: Fix SRSO mess Objtool --rethunk does two things: - it collects all (tail) call's of __x86_return_thunk and places them into .return_sites. These are typically compiler generated, but RET also emits this same. - it fudges the validation of the __x86_return_thunk symbol; because this symbol is inside another instruction, it can't actually find the instruction pointed to by the symbol offset and gets upset. Because these two things pertained to the same symbol, there was no pressing need to separate these two separate things. However, alas, along comes SRSO and more crazy things to deal with appeared. The SRSO patch itself added the following symbol names to identify as rethunk: 'srso_untrain_ret', 'srso_safe_ret' and '__ret' Where '__ret' is the old retbleed return thunk, 'srso_safe_ret' is a new similarly embedded return thunk, and 'srso_untrain_ret' is completely unrelated to anything the above does (and was only included because of that INT3 vs UD2 issue fixed previous). Clear things up by adding a second category for the embedded instruction thing. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.704502245@infradead.org --- tools/objtool/arch/x86/decode.c | 11 +++++++---- tools/objtool/check.c | 24 ++++++++++++++++++++++-- tools/objtool/include/objtool/arch.h | 1 + tools/objtool/include/objtool/elf.h | 1 + 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 2d51fa8da9e8c..cba8a7be040e3 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk") || - !strcmp(sym->name, "srso_untrain_ret") || - !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + return !strcmp(sym->name, "__x86_return_thunk"); +} + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "__ret") || + !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index e2ee10ce7703f..191656ee9fbc9 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -455,7 +455,7 @@ static int decode_instructions(struct objtool_file *file) return -1; } - if (func->return_thunk || func->alias != func) + if (func->embedded_insn || func->alias != func) continue; if (!find_insn(file, sec, func->offset)) { @@ -1288,16 +1288,33 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; } +/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; } +/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; } +/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) { struct reloc *reloc; @@ -1583,7 +1600,7 @@ static int add_jump_destinations(struct objtool_file *file) * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -2502,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file) if (arch_is_rethunk(func)) func->return_thunk = true; + if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (arch_ftrace_match(func->name)) func->fentry = true; diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 2b6d2ce4f9a5b..0b303eba660e4 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base); bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym); int arch_rewrite_retpolines(struct objtool_file *file); diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index c532d70864dc5..9f71e988eca45 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -66,6 +66,7 @@ struct symbol { u8 fentry : 1; u8 profiling_func : 1; u8 warned : 1; + u8 embedded_insn : 1; struct list_head pv_target; struct reloc *relocs; }; From 095b8303f3835c68ac4a8b6d754ca1c3b6230711 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:30 +0200 Subject: [PATCH 09/18] x86/alternative: Make custom return thunk unconditional There is infrastructure to rewrite return thunks to point to any random thunk one desires, unwrap that from CALL_THUNKS, which up to now was the sole user of that. [ bp: Make the thunks visible on 32-bit and add ifdeffery for the 32-bit builds. ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.775293785@infradead.org --- arch/x86/include/asm/nospec-branch.h | 9 +++++---- arch/x86/kernel/alternative.c | 4 ---- arch/x86/kernel/cpu/bugs.c | 2 ++ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e50db53a4cb9f..b3625cceb2f02 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -341,17 +341,18 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + extern void zen_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); extern void entry_ibpb(void); -#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif #ifdef CONFIG_CALL_DEPTH_TRACKING extern void __x86_return_skl(void); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2dcf3a06af090..099d58d02a262 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -687,10 +687,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) #ifdef CONFIG_RETHUNK -#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6c04aef4b63b2..3bc0d149eacd6 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { From d43490d0ab824023e11d0b57d0aeec17a6e0ca13 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:31 +0200 Subject: [PATCH 10/18] x86/cpu: Clean up SRSO return thunk mess Use the existing configurable return thunk. There is absolute no justification for having created this __x86_return_thunk alternative. To clarify, the whole thing looks like: Zen3/4 does: srso_alias_untrain_ret: nop2 lfence jmp srso_alias_return_thunk int3 srso_alias_safe_ret: // aliasses srso_alias_untrain_ret just so add $8, %rsp ret int3 srso_alias_return_thunk: call srso_alias_safe_ret ud2 While Zen1/2 does: srso_untrain_ret: movabs $foo, %rax lfence call srso_safe_ret (jmp srso_return_thunk ?) int3 srso_safe_ret: // embedded in movabs instruction add $8,%rsp ret int3 srso_return_thunk: call srso_safe_ret ud2 While retbleed does: zen_untrain_ret: test $0xcc, %bl lfence jmp zen_return_thunk int3 zen_return_thunk: // embedded in the test instruction ret int3 Where Zen1/2 flush the BTB entry using the instruction decoder trick (test,movabs) Zen3/4 use BTB aliasing. SRSO adds a return sequence (srso_safe_ret()) which forces the function return instruction to speculate into a trap (UD2). This RET will then mispredict and execution will continue at the return site read from the top of the stack. Pick one of three options at boot (evey function can only ever return once). [ bp: Fixup commit message uarch details and add them in a comment in the code too. Add a comment about the srso_select_mitigation() dependency on retbleed_select_mitigation(). Add moar ifdeffery for 32-bit builds. Add a dummy srso_untrain_ret_alias() definition for 32-bit alternatives needing the symbol. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.842775684@infradead.org --- arch/x86/include/asm/nospec-branch.h | 5 +++ arch/x86/kernel/cpu/bugs.c | 15 ++++++- arch/x86/kernel/vmlinux.lds.S | 2 +- arch/x86/lib/retpoline.S | 58 ++++++++++++++++++++-------- tools/objtool/arch/x86/decode.c | 2 +- 5 files changed, 62 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index b3625cceb2f02..5ed78ad9f2746 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -347,9 +347,14 @@ extern void __x86_return_thunk(void); static inline void __x86_return_thunk(void) {} #endif +extern void zen_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + extern void zen_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); + extern void entry_ibpb(void); extern void (*x86_return_thunk)(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 3bc0d149eacd6..56cf250c13f6f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -167,6 +167,11 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); gds_select_mitigation(); } @@ -1037,6 +1042,9 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = zen_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -2453,10 +2461,13 @@ static void __init srso_select_mitigation(void) */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 8e2a306e45b53..d3b02d67321b6 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -521,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index a478eb5bbf62a..7df8582fb64ee 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -151,22 +151,27 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) .section .text..__x86.rethunk_untrain SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence - jmp __x86_return_thunk + jmp srso_alias_return_thunk SYM_FUNC_END(srso_untrain_ret_alias) __EXPORT_THUNK(srso_untrain_ret_alias) .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_untrain_ret_alias) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 @@ -174,9 +179,16 @@ SYM_FUNC_END(srso_safe_ret_alias) .section .text..__x86.return_thunk +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret_alias + ud2 +SYM_CODE_END(srso_alias_return_thunk) + /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. * 2) The instruction at zen_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. @@ -184,7 +196,7 @@ SYM_FUNC_END(srso_safe_ret_alias) * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc + .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* @@ -192,16 +204,16 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP zen_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * zen_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from zen_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -213,13 +225,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, zen_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(zen_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -230,7 +242,7 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp zen_return_thunk int3 SYM_FUNC_END(zen_untrain_ret) __EXPORT_THUNK(zen_untrain_ret) @@ -251,11 +263,18 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 + /* end of movabs */ lfence call srso_safe_ret ud2 @@ -263,12 +282,19 @@ SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_CODE_START(__x86_return_thunk) +SYM_CODE_START(srso_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + call srso_safe_ret ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret + int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index cba8a7be040e3..c55f3bb59b313 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -829,6 +829,6 @@ bool arch_is_rethunk(struct symbol *sym) bool arch_is_embedded_insn(struct symbol *sym) { - return !strcmp(sym->name, "__ret") || + return !strcmp(sym->name, "zen_return_thunk") || !strcmp(sym->name, "srso_safe_ret"); } From d025b7bac07a6e90b6b98b487f88854ad9247c39 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:32 +0200 Subject: [PATCH 11/18] x86/cpu: Rename original retbleed methods Rename the original retbleed return thunk and untrain_ret to retbleed_return_thunk() and retbleed_untrain_ret(). No functional changes. Suggested-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.909378169@infradead.org --- arch/x86/include/asm/nospec-branch.h | 8 ++++---- arch/x86/kernel/cpu/bugs.c | 2 +- arch/x86/kernel/vmlinux.lds.S | 2 +- arch/x86/lib/retpoline.S | 30 ++++++++++++++-------------- tools/objtool/arch/x86/decode.c | 2 +- tools/objtool/check.c | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 5ed78ad9f2746..8a0d4c5f4cb76 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -272,7 +272,7 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret" #else #define CALL_ZEN_UNTRAIN_RET "" #endif @@ -282,7 +282,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -347,11 +347,11 @@ extern void __x86_return_thunk(void); static inline void __x86_return_thunk(void) {} #endif -extern void zen_return_thunk(void); +extern void retbleed_return_thunk(void); extern void srso_return_thunk(void); extern void srso_alias_return_thunk(void); -extern void zen_untrain_ret(void); +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 56cf250c13f6f..bbbbda981bab7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1043,7 +1043,7 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_UNRET); if (IS_ENABLED(CONFIG_RETHUNK)) - x86_return_thunk = zen_return_thunk; + x86_return_thunk = retbleed_return_thunk; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d3b02d67321b6..7c0e2b4fc3447 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -521,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned"); +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 7df8582fb64ee..adabd07eab998 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -188,32 +188,32 @@ SYM_CODE_END(srso_alias_return_thunk) /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc -SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP zen_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * zen_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from zen_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -225,13 +225,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, zen_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(zen_return_thunk) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -242,13 +242,13 @@ SYM_CODE_END(zen_return_thunk) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp zen_return_thunk + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) /* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * * movabs $0xccccc30824648d48,%rax diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index c55f3bb59b313..c0f25d00181ec 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -829,6 +829,6 @@ bool arch_is_rethunk(struct symbol *sym) bool arch_is_embedded_insn(struct symbol *sym) { - return !strcmp(sym->name, "zen_return_thunk") || + return !strcmp(sym->name, "retbleed_return_thunk") || !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 191656ee9fbc9..7a9aaf4008737 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1593,7 +1593,7 @@ static int add_jump_destinations(struct objtool_file *file) struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the From 42be649dd1f2eee6b1fb185f1a231b9494cf095f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:33 +0200 Subject: [PATCH 12/18] x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 For a more consistent namespace. [ bp: Fixup names in the doc too. ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.976236447@infradead.org --- Documentation/admin-guide/hw-vuln/srso.rst | 4 ++-- arch/x86/include/asm/nospec-branch.h | 6 ++--- arch/x86/kernel/vmlinux.lds.S | 8 +++---- arch/x86/lib/retpoline.S | 26 +++++++++++----------- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index af59a93956621..b6cfb51cb0b46 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -141,8 +141,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 8a0d4c5f4cb76..f7c337515d1e8 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -300,7 +300,7 @@ #ifdef CONFIG_CPU_SRSO ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS + "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif .endm @@ -316,7 +316,7 @@ #ifdef CONFIG_CPU_SRSO ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS + "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif .endm @@ -353,7 +353,7 @@ extern void srso_alias_return_thunk(void); extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); extern void entry_ibpb(void); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 7c0e2b4fc3447..83d41c2601d7b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -147,10 +147,10 @@ SECTIONS #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END @@ -536,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index adabd07eab998..d37e5ab0d5ee6 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -133,56 +133,56 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO .section .text..__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence jmp srso_alias_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) .section .text..__x86.rethunk_safe #else /* dummy definition for alternatives */ -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) #endif -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) .section .text..__x86.return_thunk SYM_CODE_START(srso_alias_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - call srso_safe_ret_alias + call srso_alias_safe_ret ud2 SYM_CODE_END(srso_alias_return_thunk) From e7c25c441e9e0fa75b4c83e0b26306b702cfe90d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:34 +0200 Subject: [PATCH 13/18] x86/cpu: Cleanup the untrain mess Since there can only be one active return_thunk, there only needs be one (matching) untrain_ret. It fundamentally doesn't make sense to allow multiple untrain_ret at the same time. Fold all the 3 different untrain methods into a single (temporary) helper stub. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org --- arch/x86/include/asm/nospec-branch.h | 19 +++++-------------- arch/x86/kernel/cpu/bugs.c | 1 + arch/x86/lib/retpoline.S | 7 +++++++ 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index f7c337515d1e8..5285c8e93dff3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -272,9 +272,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -293,15 +293,10 @@ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS -#endif .endm .macro UNTRAIN_RET_FROM_CALL @@ -309,15 +304,10 @@ defined(CONFIG_CALL_DEPTH_TRACKING) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS -#endif .endm @@ -355,6 +345,7 @@ extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_alias_untrain_ret(void); +extern void entry_untrain_ret(void); extern void entry_ibpb(void); extern void (*x86_return_thunk)(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index bbbbda981bab7..6f3e195272867 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2460,6 +2460,7 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index d37e5ab0d5ee6..5e85da150e961 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -289,6 +289,13 @@ SYM_CODE_START(srso_return_thunk) ud2 SYM_CODE_END(srso_return_thunk) +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + SYM_CODE_START(__x86_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR From 864bcaa38ee44ec6c0e43f79c2d2997b977e26b2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:35 +0200 Subject: [PATCH 14/18] x86/cpu/kvm: Provide UNTRAIN_RET_VM Similar to how it doesn't make sense to have UNTRAIN_RET have two untrain calls, it also doesn't make sense for VMEXIT to have an extra IBPB call. This cures VMEXIT doing potentially unret+IBPB or double IBPB. Also, the (SEV) VMEXIT case seems to have been overlooked. Redefine the meaning of the synthetic IBPB flags to: - ENTRY_IBPB -- issue IBPB on entry (was: entry + VMEXIT) - IBPB_ON_VMEXIT -- issue IBPB on VMEXIT And have 'retbleed=ibpb' set *BOTH* feature flags to ensure it retains the previous behaviour and issues IBPB on entry+VMEXIT. The new 'srso=ibpb_vmexit' option only sets IBPB_ON_VMEXIT. Create UNTRAIN_RET_VM specifically for the VMEXIT case, and have that check IBPB_ON_VMEXIT. All this avoids having the VMEXIT case having to check both ENTRY_IBPB and IBPB_ON_VMEXIT and simplifies the alternatives. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121149.109557833@infradead.org --- arch/x86/include/asm/nospec-branch.h | 11 +++++++++++ arch/x86/kernel/cpu/bugs.c | 1 + arch/x86/kvm/svm/vmenter.S | 7 ++----- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 5285c8e93dff3..c55cc243592e9 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -299,6 +299,17 @@ #endif .endm +.macro UNTRAIN_RET_VM +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) + VALIDATE_UNRET_END + ALTERNATIVE_3 "", \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \ + __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH +#endif +.endm + .macro UNTRAIN_RET_FROM_CALL #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ defined(CONFIG_CALL_DEPTH_TRACKING) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6f3e195272867..9026e3fe9f6c7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1054,6 +1054,7 @@ static void __init retbleed_select_mitigation(void) case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; break; diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 265452fc9ebe9..ef2ebabb059c8 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run) * because interrupt handlers won't sanitize 'ret' if the return is * from the kernel. */ - UNTRAIN_RET - - /* SRSO */ - ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + UNTRAIN_RET_VM /* * Clear all general purpose registers except RSP and RAX to prevent @@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) * because interrupt handlers won't sanitize RET if the return is * from the kernel. */ - UNTRAIN_RET + UNTRAIN_RET_VM /* "Pop" @spec_ctrl_intercepted. */ pop %_ASM_BX From 9dbd23e42ff0b10c9b02c9e649c76e5228241a8e Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 14 Aug 2023 21:29:50 +0200 Subject: [PATCH 15/18] x86/srso: Explain the untraining sequences a bit more The goal is to eventually have a proper documentation about all this. Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814164447.GFZNpZ/64H4lENIe94@fat_crate.local --- arch/x86/lib/retpoline.S | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 5e85da150e961..cd86aeb5fdd3e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -186,6 +186,25 @@ SYM_CODE_START(srso_alias_return_thunk) ud2 SYM_CODE_END(srso_alias_return_thunk) +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ + /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for From dbf46008775516f7f25c95b7760041c286299783 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Aug 2023 13:59:21 +0200 Subject: [PATCH 16/18] objtool/x86: Fixup frame-pointer vs rethunk For stack-validation of a frame-pointer build, objtool validates that every CALL instruction is preceded by a frame-setup. The new SRSO return thunks violate this with their RSB stuffing trickery. Extend the __fentry__ exception to also cover the embedded_insn case used for this. This cures: vmlinux.o: warning: objtool: srso_untrain_ret+0xd: call without frame pointer save/setup Fixes: 4ae68b26c3ab ("objtool/x86: Fix SRSO mess") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20230816115921.GH980931@hirez.programming.kicks-ass.net --- tools/objtool/check.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 7a9aaf4008737..1384090530dbe 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2650,12 +2650,17 @@ static int decode_sections(struct objtool_file *file) return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn_call_dest(insn) && - insn_call_dest(insn)->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn_call_dest(insn); + + if (!dest) + return false; + + if (dest->fentry || dest->embedded_insn) + return true; + } return false; } @@ -3656,7 +3661,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret; - if (opts.stackval && func && !is_fentry_call(insn) && + if (opts.stackval && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_INSN(insn, "call without frame pointer save/setup"); return 1; From 54097309620ef0dc2d7083783dc521c6a5fef957 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Aug 2023 12:44:19 +0200 Subject: [PATCH 17/18] x86/static_call: Fix __static_call_fixup() Christian reported spurious module load crashes after some of Song's module memory layout patches. Turns out that if the very last instruction on the very last page of the module is a 'JMP __x86_return_thunk' then __static_call_fixup() will trip a fault and die. And while the module rework made this slightly more likely to happen, it's always been possible. Fixes: ee88d363d156 ("x86,static_call: Use alternative RET encoding") Reported-by: Christian Bricart Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lkml.kernel.org/r/20230816104419.GA982867@hirez.programming.kicks-ass.net --- arch/x86/kernel/static_call.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index b70670a985978..77a9316da4357 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; From 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Tue, 15 Aug 2023 11:53:13 +0200 Subject: [PATCH 18/18] x86/srso: Correct the mitigation status when SMT is disabled Specify how is SRSO mitigated when SMT is disabled. Also, correct the SMT check for that. Fixes: e9fbc47b818b ("x86/srso: Disable the mitigation on unaffected configurations") Suggested-by: Josh Poimboeuf Signed-off-by: Borislav Petkov (AMD) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20230814200813.p5czl47zssuej7nv@treble --- arch/x86/kernel/cpu/bugs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 9026e3fe9f6c7..f081d26616ac1 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2428,8 +2428,7 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) { + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); return; } @@ -2714,7 +2713,7 @@ static ssize_t retbleed_show_state(char *buf) static ssize_t srso_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_SRSO_NO)) - return sysfs_emit(buf, "Not affected\n"); + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation],