Skip to content

Commit

Permalink
x86/retbleed: Add fine grained Kconfig knobs
Browse files Browse the repository at this point in the history
Do fine-grained Kconfig for all the various retbleed parts.

NOTE: if your compiler doesn't support return thunks this will
silently 'upgrade' your mitigation to IBPB, you might not like this.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
  • Loading branch information
Peter Zijlstra authored and Borislav Petkov committed Jun 29, 2022
1 parent 26aae8c commit f43b987
Show file tree
Hide file tree
Showing 19 changed files with 178 additions and 69 deletions.
111 changes: 85 additions & 26 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -462,32 +462,6 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH

config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
select OBJTOOL if HAVE_OBJTOOL
default y
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.

config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)

config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)

config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
select OBJTOOL if HAVE_OBJTOOL
default n
help
Compile the kernel with straight-line-speculation options to guard
against straight line speculation. The kernel image might be slightly
larger.

config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
Expand Down Expand Up @@ -2456,6 +2430,91 @@ source "kernel/livepatch/Kconfig"

endmenu

config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)

config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)

menuconfig SPECULATION_MITIGATIONS
bool "Mitigations for speculative execution vulnerabilities"
default y
help
Say Y here to enable options which enable mitigations for
speculative execution hardware vulnerabilities.

If you say N, all mitigations will be disabled. You really
should know what you are doing to say so.

if SPECULATION_MITIGATIONS

config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
depends on (X86_64 || X86_PAE)
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
into userspace.

See Documentation/x86/pti.rst for more details.

config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
select OBJTOOL if HAVE_OBJTOOL
default y
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.

config RETHUNK
bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK
select OBJTOOL if HAVE_OBJTOOL
default y
help
Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation.
Requires a compiler with -mfunction-return=thunk-extern
support for full protection. The kernel may run slower.

config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK
default y
help
Compile the kernel with support for the retbleed=unret mitigation.

config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD
default y
help
Compile the kernel with support for the retbleed=ibpb mitigation.

config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry"
depends on CPU_SUP_INTEL
default y
help
Compile the kernel with support for the spectre_v2=ibrs mitigation.
This mitigates both spectre_v2 and retbleed at great cost to
performance.

config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
select OBJTOOL if HAVE_OBJTOOL
default n
help
Compile the kernel with straight-line-speculation options to guard
against straight line speculation. The kernel image might be slightly
larger.

endif

config ARCH_HAS_ADD_PAGES
def_bool y
depends on ARCH_ENABLE_MEMORY_HOTPLUG
Expand Down
8 changes: 6 additions & 2 deletions arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,18 @@ endif
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern)
endif

ifdef CONFIG_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
endif

export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS

Expand Down
4 changes: 4 additions & 0 deletions arch/x86/entry/calling.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ For 32-bit we have the following conventions - kernel is built with
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
*/
.macro IBRS_ENTER save_reg
#ifdef CONFIG_CPU_IBRS_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx

Expand All @@ -317,13 +318,15 @@ For 32-bit we have the following conventions - kernel is built with
shr $32, %rdx
wrmsr
.Lend_\@:
#endif
.endm

/*
* Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
* regs. Must be called after the last RET.
*/
.macro IBRS_EXIT save_reg
#ifdef CONFIG_CPU_IBRS_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx

Expand All @@ -338,6 +341,7 @@ For 32-bit we have the following conventions - kernel is built with
shr $32, %rdx
wrmsr
.Lend_\@:
#endif
.endm

/*
Expand Down
18 changes: 14 additions & 4 deletions arch/x86/include/asm/disabled-features.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,19 @@
# define DISABLE_RETPOLINE 0
#else
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \
(1 << (X86_FEATURE_RETHUNK & 31)) | \
(1 << (X86_FEATURE_UNRET & 31)))
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
#endif

#ifdef CONFIG_RETHUNK
# define DISABLE_RETHUNK 0
#else
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
#endif

#ifdef CONFIG_CPU_UNRET_ENTRY
# define DISABLE_UNRET 0
#else
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
#endif

#ifdef CONFIG_INTEL_IOMMU_SVM
Expand Down Expand Up @@ -91,7 +101,7 @@
#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 (DISABLE_RETPOLINE)
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/linkage.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
#define __ALIGN_STR __stringify(__ALIGN)
#endif

#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define RET jmp __x86_return_thunk
#else /* CONFIG_RETPOLINE */
#ifdef CONFIG_SLS
Expand All @@ -31,7 +31,7 @@

#else /* __ASSEMBLY__ */

#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define ASM_RET "jmp __x86_return_thunk\n\t"
#else /* CONFIG_RETPOLINE */
#ifdef CONFIG_SLS
Expand Down
10 changes: 8 additions & 2 deletions arch/x86/include/asm/nospec-branch.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@
.Lskip_rsb_\@:
.endm

#ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
#else
#define CALL_ZEN_UNTRAIN_RET ""
#endif

/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
* return thunk isn't mapped into the userspace tables (then again, AMD
Expand All @@ -139,10 +145,10 @@
* where we have a stack but before any RET instruction.
*/
.macro UNTRAIN_RET
#ifdef CONFIG_RETPOLINE
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
ANNOTATE_UNRET_END
ALTERNATIVE_2 "", \
"call zen_untrain_ret", X86_FEATURE_UNRET, \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
#endif
.endm
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/static_call.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")

#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_RETHUNK
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk")
#else
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
}
}

#ifdef CONFIG_RETHUNK
/*
* Rewrite the compiler generated return thunk tail-calls.
*
Expand Down Expand Up @@ -569,6 +570,10 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
}
}
}
#else
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
#endif /* CONFIG_RETHUNK */

#else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */

void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -864,6 +864,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)

void init_spectral_chicken(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_CPU_UNRET_ENTRY
u64 value;

/*
Expand All @@ -880,6 +881,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
}
}
#endif
}

static void init_amd_zn(struct cpuinfo_x86 *c)
Expand Down
42 changes: 27 additions & 15 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,6 @@ static int __init retbleed_parse_cmdline(char *str)
early_param("retbleed", retbleed_parse_cmdline);

#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"

static void __init retbleed_select_mitigation(void)
Expand All @@ -857,18 +856,33 @@ static void __init retbleed_select_mitigation(void)
return;

case RETBLEED_CMD_UNRET:
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
} else {
pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
goto do_cmd_auto;
}
break;

case RETBLEED_CMD_IBPB:
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
goto do_cmd_auto;
}
break;

do_cmd_auto:
case RETBLEED_CMD_AUTO:
default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY))
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
}

/*
* The Intel mitigation (IBRS or eIBRS) was already selected in
Expand All @@ -881,14 +895,6 @@ static void __init retbleed_select_mitigation(void)

switch (retbleed_mitigation) {
case RETBLEED_MITIGATION_UNRET:

if (!IS_ENABLED(CONFIG_RETPOLINE) ||
!IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
pr_err(RETBLEED_COMPILER_MSG);
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
goto retbleed_force_ibpb;
}

setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);

Expand All @@ -900,7 +906,6 @@ static void __init retbleed_select_mitigation(void)
break;

case RETBLEED_MITIGATION_IBPB:
retbleed_force_ibpb:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
mitigate_smt = true;
break;
Expand Down Expand Up @@ -1271,6 +1276,12 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}

if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}

if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option);
Expand Down Expand Up @@ -1328,7 +1339,8 @@ static void __init spectre_v2_select_mitigation(void)
break;
}

if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
boot_cpu_has(X86_FEATURE_IBRS) &&
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/static_call.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);

#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_RETHUNK
/*
* This is called by apply_returns() to fix up static call trampolines,
* specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
Expand Down
Loading

0 comments on commit f43b987

Please sign in to comment.