Skip to content

Commit

Permalink
Merge tag 'x86-urgent-2023-09-22' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/tip/tip

Pull misc x86 fixes from Ingo Molnar:

 - Fix a kexec bug

 - Fix an UML build bug

 - Fix a handful of SRSO related bugs

 - Fix a shadow stacks handling bug & robustify related code

* tag 'x86-urgent-2023-09-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/shstk: Add warning for shadow stack double unmap
  x86/shstk: Remove useless clone error handling
  x86/shstk: Handle vfork clone failure correctly
  x86/srso: Fix SBPB enablement for spec_rstack_overflow=off
  x86/srso: Don't probe microcode in a guest
  x86/srso: Set CPUID feature bits independently of bug or mitigation status
  x86/srso: Fix srso_show_state() side effect
  x86/asm: Fix build of UML with KASAN
  x86/mm, kexec, ima: Use memblock_free_late() from ima_free_kexec_buffer()
  • Loading branch information
Linus Torvalds committed Sep 22, 2023
2 parents 5b47b57 + 509ff51 commit e583bff
Show file tree
Hide file tree
Showing 11 changed files with 56 additions and 55 deletions.
7 changes: 7 additions & 0 deletions arch/x86/include/asm/linkage.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,13 @@
CFI_POST_PADDING \
SYM_FUNC_END(__cfi_##name)

/* UML needs to be able to override memcpy() and friends for KASAN. */
#ifdef CONFIG_UML
# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS_WEAK
#else
# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
#endif

/* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
#define SYM_TYPED_FUNC_START(name) \
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,7 @@ do { \
#else
#define deactivate_mm(tsk, mm) \
do { \
if (!tsk->vfork_done) \
shstk_free(tsk); \
shstk_free(tsk); \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -683,13 +683,11 @@ extern u16 get_llc_id(unsigned int cpu);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
static inline void amd_check_microcode(void) { }
#endif
Expand Down
28 changes: 9 additions & 19 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -766,6 +766,15 @@ static void early_init_amd(struct cpuinfo_x86 *c)

if (cpu_has(c, X86_FEATURE_TOPOEXT))
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;

if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
setup_force_cpu_cap(X86_FEATURE_SBPB);
}
}
}

static void init_amd_k8(struct cpuinfo_x86 *c)
Expand Down Expand Up @@ -1301,25 +1310,6 @@ void amd_check_microcode(void)
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}

bool cpu_has_ibpb_brtype_microcode(void)
{
switch (boot_cpu_data.x86) {
/* Zen1/2 IBPB flushes branch type predictions too. */
case 0x17:
return boot_cpu_has(X86_FEATURE_AMD_IBPB);
case 0x19:
/* Poke the MSR bit on Zen3/4 to check its presence. */
if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
setup_force_cpu_cap(X86_FEATURE_SBPB);
return true;
} else {
return false;
}
default:
return false;
}
}

/*
* Issue a DIV 0/1 insn to clear any division data from previous DIV
* operations.
Expand Down
17 changes: 3 additions & 14 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2404,26 +2404,15 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);

static void __init srso_select_mitigation(void)
{
bool has_microcode;
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);

if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
goto pred_cmd;

/*
* The first check is for the kernel running as a guest in order
* for guests to verify whether IBPB is a viable mitigation.
*/
has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
if (!has_microcode) {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
} else {
/*
* Enable the synthetic (even if in a real CPUID leaf)
* flags for guests.
*/
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);

/*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
Expand All @@ -2444,7 +2433,7 @@ static void __init srso_select_mitigation(void)

switch (srso_cmd) {
case SRSO_CMD_OFF:
return;
goto pred_cmd;

case SRSO_CMD_MICROCODE:
if (has_microcode) {
Expand Down Expand Up @@ -2717,7 +2706,7 @@ static ssize_t srso_show_state(char *buf)

return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
}

static ssize_t gds_show_state(char *buf)
Expand Down
7 changes: 0 additions & 7 deletions arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -257,13 +257,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
io_bitmap_share(p);

/*
* If copy_thread() if failing, don't leak the shadow stack possibly
* allocated in shstk_alloc_thread_stack() above.
*/
if (ret)
shstk_free(p);

return ret;
}

Expand Down
8 changes: 2 additions & 6 deletions arch/x86/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,15 +358,11 @@ static void __init add_early_ima_buffer(u64 phys_addr)
#if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
int __init ima_free_kexec_buffer(void)
{
int rc;

if (!ima_kexec_buffer_size)
return -ENOENT;

rc = memblock_phys_free(ima_kexec_buffer_phys,
ima_kexec_buffer_size);
if (rc)
return rc;
memblock_free_late(ima_kexec_buffer_phys,
ima_kexec_buffer_size);

ima_kexec_buffer_phys = 0;
ima_kexec_buffer_size = 0;
Expand Down
33 changes: 31 additions & 2 deletions arch/x86/kernel/shstk.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,10 +205,21 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long cl
return 0;

/*
* For CLONE_VM, except vfork, the child needs a separate shadow
* For CLONE_VFORK the child will share the parents shadow stack.
* Make sure to clear the internal tracking of the thread shadow
* stack so the freeing logic run for child knows to leave it alone.
*/
if (clone_flags & CLONE_VFORK) {
shstk->base = 0;
shstk->size = 0;
return 0;
}

/*
* For !CLONE_VM the child will use a copy of the parents shadow
* stack.
*/
if ((clone_flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM)
if (!(clone_flags & CLONE_VM))
return 0;

size = adjust_shstk_size(stack_size);
Expand Down Expand Up @@ -408,7 +419,25 @@ void shstk_free(struct task_struct *tsk)
if (!tsk->mm || tsk->mm != current->mm)
return;

/*
* If shstk->base is NULL, then this task is not managing its
* own shadow stack (CLONE_VFORK). So skip freeing it.
*/
if (!shstk->base)
return;

/*
* shstk->base is NULL for CLONE_VFORK child tasks, and so is
* normal. But size = 0 on a shstk->base is not normal and
* indicated an attempt to free the thread shadow stack twice.
* Warn about it.
*/
if (WARN_ON(!shstk->size))
return;

unmap_shadow_stack(shstk->base, shstk->size);

shstk->size = 0;
}

static int wrss_control(bool enable)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/lib/memcpy_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ SYM_TYPED_FUNC_START(__memcpy)
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)

SYM_FUNC_ALIAS(memcpy, __memcpy)
SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)

SYM_FUNC_START_LOCAL(memcpy_orig)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/lib/memmove_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -212,5 +212,5 @@ SYM_FUNC_START(__memmove)
SYM_FUNC_END(__memmove)
EXPORT_SYMBOL(__memmove)

SYM_FUNC_ALIAS(memmove, __memmove)
SYM_FUNC_ALIAS_MEMFUNC(memmove, __memmove)
EXPORT_SYMBOL(memmove)
2 changes: 1 addition & 1 deletion arch/x86/lib/memset_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)

SYM_FUNC_ALIAS(memset, __memset)
SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
EXPORT_SYMBOL(memset)

SYM_FUNC_START_LOCAL(memset_orig)
Expand Down

0 comments on commit e583bff

Please sign in to comment.