Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
1
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
b63f20a
Documentation
LICENSES
arch
alpha
arc
arm
arm64
c6x
csky
h8300
hexagon
ia64
m68k
microblaze
mips
nds32
nios2
openrisc
parisc
powerpc
riscv
s390
sh
sparc
um
unicore32
x86
boot
configs
crypto
entry
events
hyperv
ia32
include
asm
crypto
e820
fpu
numachip
trace
uv
vdso
xen
Kbuild
acenv.h
acpi.h
acrn.h
agp.h
alternative-asm.h
alternative.h
amd_nb.h
apb_timer.h
apic.h
apic_flat_64.h
apicdef.h
apm.h
arch_hweight.h
archrandom.h
asm-offsets.h
asm-prototypes.h
asm.h
atomic.h
atomic64_32.h
atomic64_64.h
barrier.h
bios_ebda.h
bitops.h
boot.h
bootparam_utils.h
bug.h
bugs.h
cache.h
cacheflush.h
cacheinfo.h
calgary.h
ce4100.h
checksum.h
checksum_32.h
checksum_64.h
clocksource.h
cmdline.h
cmpxchg.h
cmpxchg_32.h
cmpxchg_64.h
compat.h
cpu.h
cpu_device_id.h
cpu_entry_area.h
cpufeature.h
cpufeatures.h
cpumask.h
crash.h
current.h
debugreg.h
delay.h
desc.h
desc_defs.h
device.h
disabled-features.h
div64.h
dma-direct.h
dma-mapping.h
dma.h
dmi.h
dwarf2.h
edac.h
efi.h
elf.h
emergency-restart.h
entry_arch.h
error-injection.h
espfix.h
exec.h
extable.h
fb.h
fixmap.h
floppy.h
frame.h
fsgsbase.h
ftrace.h
futex.h
gart.h
genapic.h
geode.h
hardirq.h
highmem.h
hpet.h
hugetlb.h
hw_breakpoint.h
hw_irq.h
hyperv-tlfs.h
hypervisor.h
i8259.h
ia32.h
ia32_unistd.h
imr.h
inat.h
inat_types.h
init.h
insn-eval.h
insn.h
inst.h
intel-family.h
intel-mid.h
intel_ds.h
intel_mid_vrtc.h
intel_pconfig.h
intel_pmc_ipc.h
intel_pt.h
intel_punit_ipc.h
intel_scu_ipc.h
intel_telemetry.h
invpcid.h
io.h
io_apic.h
iomap.h
iommu.h
iommu_table.h
iosf_mbi.h
ipi.h
irq.h
irq_regs.h
irq_remapping.h
irq_vectors.h
irq_work.h
irqdomain.h
irqflags.h
ist.h
jailhouse_para.h
jump_label.h
kasan.h
kaslr.h
kbdleds.h
kdebug.h
kexec-bzimage64.h
kexec.h
kgdb.h
kmap_types.h
kprobes.h
kvm_emulate.h
kvm_host.h
kvm_page_track.h
kvm_para.h
kvm_vcpu_regs.h
kvmclock.h
linkage.h
livepatch.h
local.h
local64.h
mach_timer.h
mach_traps.h
math_emu.h
mc146818rtc.h
mce.h
mcsafe_test.h
mem_encrypt.h
microcode.h
microcode_amd.h
microcode_intel.h
misc.h
mmconfig.h
mmu.h
mmu_context.h
mmx.h
mmzone.h
mmzone_32.h
mmzone_64.h
module.h
mpspec.h
mpspec_def.h
mpx.h
mshyperv.h
msi.h
msidef.h
msr-index.h
msr-trace.h
msr.h
mtrr.h
mwait.h
nmi.h
nops.h
nospec-branch.h
numa.h
numa_32.h
olpc.h
olpc_ofw.h
orc_lookup.h
orc_types.h
page.h
page_32.h
page_32_types.h
page_64.h
page_64_types.h
page_types.h
paravirt.h
paravirt_types.h
parport.h
pat.h
pci-direct.h
pci-functions.h
pci.h
pci_64.h
pci_x86.h
percpu.h
perf_event.h
perf_event_p4.h
pgalloc.h
pgtable-2level.h
pgtable-2level_types.h
pgtable-3level.h
pgtable-3level_types.h
pgtable-invert.h
pgtable.h
pgtable_32.h
pgtable_32_types.h
pgtable_64.h
pgtable_64_types.h
pgtable_types.h
pkeys.h
platform_sst_audio.h
pm-trace.h
posix_types.h
preempt.h
probe_roms.h
processor-cyrix.h
processor-flags.h
processor.h
prom.h
proto.h
pti.h
ptrace.h
purgatory.h
pvclock-abi.h
pvclock.h
qrwlock.h
qspinlock.h
qspinlock_paravirt.h
realmode.h
reboot.h
reboot_fixups.h
refcount.h
required-features.h
resctrl_sched.h
rio.h
rmwcc.h
seccomp.h
sections.h
segment.h
serial.h
set_memory.h
setup.h
setup_arch.h
shmparam.h
sigcontext.h
sigframe.h
sighandling.h
signal.h
simd.h
smap.h
smp.h
sparsemem.h
spec-ctrl.h
special_insns.h
spinlock.h
spinlock_types.h
sta2x11.h
stackprotector.h
stacktrace.h
string.h
string_32.h
string_64.h
suspend.h
suspend_32.h
suspend_64.h
svm.h
swiotlb.h
switch_to.h
sync_bitops.h
sync_core.h
syscall.h
syscall_wrapper.h
syscalls.h
sysfb.h
tce.h
text-patching.h
thread_info.h
time.h
timer.h
timex.h
tlb.h
tlbbatch.h
tlbflush.h
topology.h
trace_clock.h
traps.h
tsc.h
uaccess.h
uaccess_32.h
uaccess_64.h
umip.h
unaligned.h
unistd.h
unwind.h
unwind_hints.h
uprobes.h
user.h
user32.h
user_32.h
user_64.h
vdso.h
vga.h
vgtod.h
virtext.h
vm86.h
vmx.h
vsyscall.h
vvar.h
word-at-a-time.h
x86_init.h
xor.h
xor_32.h
xor_64.h
xor_avx.h
uapi
kernel
kvm
lib
math-emu
mm
net
oprofile
pci
platform
power
purgatory
ras
realmode
tools
um
video
xen
.gitignore
Kbuild
Kconfig
Kconfig.cpu
Kconfig.debug
Makefile
Makefile.um
Makefile_32.cpu
xtensa
.gitignore
Kconfig
block
certs
crypto
drivers
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
arch
/
x86
/
include
/
asm
/
nospec-branch.h
Copy path
Blame
Blame
Latest commit
History
History
422 lines (380 loc) · 11.2 KB
Breadcrumbs
linux
/
arch
/
x86
/
include
/
asm
/
nospec-branch.h
Top
File metadata and controls
Code
Blame
422 lines (380 loc) · 11.2 KB
Raw
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_NOSPEC_BRANCH_H_ #define _ASM_X86_NOSPEC_BRANCH_H_ #include <linux/static_key.h> #include <asm/alternative.h> #include <asm/alternative-asm.h> #include <asm/cpufeatures.h> #include <asm/msr-index.h> /* * This should be used immediately before a retpoline alternative. It tells * objtool where the retpolines are so that it can make sense of the control * flow by just reading the original instruction(s) and ignoring the * alternatives. */ #define ANNOTATE_NOSPEC_ALTERNATIVE \ ANNOTATE_IGNORE_ALTERNATIVE /* * Fill the CPU return stack buffer. * * Each entry in the RSB, if used for a speculative 'ret', contains an * infinite 'pause; lfence; jmp' loop to capture speculative execution. * * This is required in various cases for retpoline and IBRS-based * mitigations for the Spectre variant 2 vulnerability. Sometimes to * eliminate potentially bogus entries from the RSB, and sometimes * purely to ensure that it doesn't get empty, which on some CPUs would * allow predictions from other (unwanted!) sources to be used. * * We define a CPP macro such that it can be used from both .S files and * inline assembly. It's possible to do a .macro and then include that * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. */ #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ #define RSB_FILL_LOOPS 16 /* To avoid underflow */ /* * Google experimented with loop-unrolling and this turned out to be * the optimal version — two calls, each with their own speculation * trap should their return address end up getting used, in a loop. */ #define __FILL_RETURN_BUFFER(reg, nr, sp) \ mov $(nr/2), reg; \ 771: \ call 772f; \ 773: /* speculation trap */ \ pause; \ lfence; \ jmp 773b; \ 772: \ call 774f; \ 775: /* speculation trap */ \ pause; \ lfence; \ jmp 775b; \ 774: \ dec reg; \ jnz 771b; \ add $(BITS_PER_LONG/8) * nr, sp; #ifdef __ASSEMBLY__ /* * This should be used immediately before an indirect jump/call. It tells * objtool the subsequent indirect jump/call is vouched safe for retpoline * builds. */ .macro ANNOTATE_RETPOLINE_SAFE .Lannotate_\@: .pushsection .discard.retpoline_safe _ASM_PTR .Lannotate_\@ .popsection .endm /* * These are the bare retpoline primitives for indirect jmp and call. * Do not use these directly; they only exist to make the ALTERNATIVE * invocation below less ugly. */ .macro RETPOLINE_JMP reg:req call .Ldo_rop_\@ .Lspec_trap_\@: pause lfence jmp .Lspec_trap_\@ .Ldo_rop_\@: mov \reg, (%_ASM_SP) ret .endm /* * This is a wrapper around RETPOLINE_JMP so the called function in reg * returns to the instruction after the macro. */ .macro RETPOLINE_CALL reg:req jmp .Ldo_call_\@ .Ldo_retpoline_jmp_\@: RETPOLINE_JMP \reg .Ldo_call_\@: call .Ldo_retpoline_jmp_\@ .endm /* * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple * indirect jmp/call which may be susceptible to the Spectre variant 2 * attack. */ .macro JMP_NOSPEC reg:req #ifdef CONFIG_RETPOLINE ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD #else jmp *\reg #endif .endm .macro CALL_NOSPEC reg:req #ifdef CONFIG_RETPOLINE ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD #else call *\reg #endif .endm /* * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * monstrosity above, manually. */ .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req #ifdef CONFIG_RETPOLINE ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE "jmp .Lskip_rsb_\@", \ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ \ftr .Lskip_rsb_\@: #endif .endm #else /* __ASSEMBLY__ */ #define ANNOTATE_RETPOLINE_SAFE \ "999:\n\t" \ ".pushsection .discard.retpoline_safe\n\t" \ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" #ifdef CONFIG_RETPOLINE #ifdef CONFIG_X86_64 /* * Inline asm uses the %V modifier which is only in newer GCC * which is ensured when CONFIG_RETPOLINE is defined. */ # define CALL_NOSPEC \ ANNOTATE_NOSPEC_ALTERNATIVE \ ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ "call __x86_indirect_thunk_%V[thunk_target]\n", \ X86_FEATURE_RETPOLINE, \ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ X86_FEATURE_RETPOLINE_AMD) # define THUNK_TARGET(addr) [thunk_target] "r" (addr) #else /* CONFIG_X86_32 */ /* * For i386 we use the original ret-equivalent retpoline, because * otherwise we'll run out of registers. We don't care about CET * here, anyway. */ # define CALL_NOSPEC \ ANNOTATE_NOSPEC_ALTERNATIVE \ ALTERNATIVE_2( \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ " jmp 904f;\n" \ " .align 16\n" \ "901: call 903f;\n" \ "902: pause;\n" \ " lfence;\n" \ " jmp 902b;\n" \ " .align 16\n" \ "903: lea 4(%%esp), %%esp;\n" \ " pushl %[thunk_target];\n" \ " ret;\n" \ " .align 16\n" \ "904: call 901b;\n", \ X86_FEATURE_RETPOLINE, \ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ X86_FEATURE_RETPOLINE_AMD) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif #else /* No retpoline for C / inline asm */ # define CALL_NOSPEC "call *%[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif /* The Spectre V2 mitigation variants */ enum spectre_v2_mitigation { SPECTRE_V2_NONE, SPECTRE_V2_RETPOLINE_GENERIC, SPECTRE_V2_RETPOLINE_AMD, SPECTRE_V2_IBRS_ENHANCED, }; /* The indirect branch speculation control variants */ enum spectre_v2_user_mitigation { SPECTRE_V2_USER_NONE, SPECTRE_V2_USER_STRICT, SPECTRE_V2_USER_STRICT_PREFERRED, SPECTRE_V2_USER_PRCTL, SPECTRE_V2_USER_SECCOMP, }; /* The Speculative Store Bypass disable variants */ enum ssb_mitigation { SPEC_STORE_BYPASS_NONE, SPEC_STORE_BYPASS_DISABLE, SPEC_STORE_BYPASS_PRCTL, SPEC_STORE_BYPASS_SECCOMP, }; extern char __indirect_thunk_start[]; extern char __indirect_thunk_end[]; /* * On VMEXIT we must ensure that no RSB predictions learned in the guest * can be followed in the host, by overwriting the RSB completely. Both * retpoline and IBRS mitigations for Spectre v2 need this; only on future * CPUs with IBRS_ALL *might* it be avoided. */ static inline void vmexit_fill_RSB(void) { #ifdef CONFIG_RETPOLINE unsigned long loops; asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE("jmp 910f", __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), X86_FEATURE_RETPOLINE) "910:" : "=r" (loops), ASM_CALL_CONSTRAINT : : "memory" ); #endif } static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)), [feature] "i" (feature) : "memory"); } static inline void indirect_branch_prediction_barrier(void) { u64 val = PRED_CMD_IBPB; alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); } /* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; /* * With retpoline, we must use IBRS to restrict branch prediction * before calling into firmware. * * (Implemented as CPP macros due to header hell.) */ #define firmware_restrict_branch_speculation_start() \ do { \ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ \ preempt_disable(); \ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ X86_FEATURE_USE_IBRS_FW); \ } while (0) #define firmware_restrict_branch_speculation_end() \ do { \ u64 val = x86_spec_ctrl_base; \ \ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ X86_FEATURE_USE_IBRS_FW); \ preempt_enable(); \ } while (0) DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(mds_user_clear); DECLARE_STATIC_KEY_FALSE(mds_idle_clear); #include <asm/segment.h> /** * mds_clear_cpu_buffers - Mitigation for MDS vulnerability * * This uses the otherwise unused and obsolete VERW instruction in * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ static inline void mds_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS; /* * Has to be the memory-operand variant because only that * guarantees the CPU buffer flush functionality according to * documentation. The register-operand variant does not. * Works with any segment selector, but a valid writable * data segment is the fastest variant. * * "cc" clobber is required because VERW modifies ZF. */ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); } /** * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability * * Clear CPU buffers if the corresponding static key is enabled */ static inline void mds_user_clear_cpu_buffers(void) { if (static_branch_likely(&mds_user_clear)) mds_clear_cpu_buffers(); } /** * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability * * Clear CPU buffers if the corresponding static key is enabled */ static inline void mds_idle_clear_cpu_buffers(void) { if (static_branch_likely(&mds_idle_clear)) mds_clear_cpu_buffers(); } #endif /* __ASSEMBLY__ */ /* * Below is used in the eBPF JIT compiler and emits the byte sequence * for the following assembly: * * With retpolines configured: * * callq do_rop * spec_trap: * pause * lfence * jmp spec_trap * do_rop: * mov %rax,(%rsp) for x86_64 * mov %edx,(%esp) for x86_32 * retq * * Without retpolines configured: * * jmp *%rax for x86_64 * jmp *%edx for x86_32 */ #ifdef CONFIG_RETPOLINE # ifdef CONFIG_X86_64 # define RETPOLINE_RAX_BPF_JIT_SIZE 17 # define RETPOLINE_RAX_BPF_JIT() \ do { \ EMIT1_off32(0xE8, 7); /* callq do_rop */ \ /* spec_trap: */ \ EMIT2(0xF3, 0x90); /* pause */ \ EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ /* do_rop: */ \ EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ EMIT1(0xC3); /* retq */ \ } while (0) # else /* !CONFIG_X86_64 */ # define RETPOLINE_EDX_BPF_JIT() \ do { \ EMIT1_off32(0xE8, 7); /* call do_rop */ \ /* spec_trap: */ \ EMIT2(0xF3, 0x90); /* pause */ \ EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ /* do_rop: */ \ EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \ EMIT1(0xC3); /* ret */ \ } while (0) # endif #else /* !CONFIG_RETPOLINE */ # ifdef CONFIG_X86_64 # define RETPOLINE_RAX_BPF_JIT_SIZE 2 # define RETPOLINE_RAX_BPF_JIT() \ EMIT2(0xFF, 0xE0); /* jmp *%rax */ # else /* !CONFIG_X86_64 */ # define RETPOLINE_EDX_BPF_JIT() \ EMIT2(0xFF, 0xE2) /* jmp *%edx */ # endif #endif #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
You can’t perform that action at this time.