Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
c0dd924
Documentation
LICENSES
arch
alpha
arc
arm
arm64
csky
hexagon
ia64
loongarch
m68k
microblaze
mips
nios2
openrisc
parisc
powerpc
riscv
s390
sh
sparc
um
x86
boot
coco
configs
crypto
entry
events
hyperv
ia32
include
kernel
acpi
apic
cpu
mce
microcode
Makefile
amd.c
core.c
intel.c
mtrr
resctrl
sgx
.gitignore
Makefile
acrn.c
amd.c
aperfmperf.c
bugs.c
cacheinfo.c
centaur.c
common.c
cpu.h
cpuid-deps.c
cyrix.c
feat_ctl.c
hygon.c
hypervisor.c
intel.c
intel_epb.c
intel_pconfig.c
match.c
mkcapflags.sh
mshyperv.c
perfctr-watchdog.c
powerflags.c
proc.c
rdrand.c
scattered.c
topology.c
transmeta.c
tsx.c
umc.c
umwait.c
vmware.c
vortex.c
zhaoxin.c
fpu
kprobes
.gitignore
Makefile
alternative.c
amd_gart_64.c
amd_nb.c
aperture_64.c
apm_32.c
asm-offsets.c
asm-offsets_32.c
asm-offsets_64.c
audit_64.c
bootflag.c
callthunks.c
cfi.c
check.c
cpuid.c
crash.c
crash_core_32.c
crash_core_64.c
crash_dump_32.c
crash_dump_64.c
devicetree.c
doublefault_32.c
dumpstack.c
dumpstack_32.c
dumpstack_64.c
e820.c
early-quirks.c
early_printk.c
ebda.c
eisa.c
espfix_64.c
ftrace.c
ftrace_32.S
ftrace_64.S
head32.c
head64.c
head_32.S
head_64.S
hpet.c
hw_breakpoint.c
i8237.c
i8253.c
i8259.c
idt.c
io_delay.c
ioport.c
irq.c
irq_32.c
irq_64.c
irq_work.c
irqflags.S
irqinit.c
itmt.c
jailhouse.c
jump_label.c
kdebugfs.c
kexec-bzimage64.c
kgdb.c
ksysfs.c
kvm.c
kvmclock.c
ldt.c
machine_kexec_32.c
machine_kexec_64.c
mmconf-fam10h_64.c
module.c
mpparse.c
msr.c
nmi.c
nmi_selftest.c
paravirt-spinlocks.c
paravirt.c
pci-dma.c
pcspeaker.c
perf_regs.c
platform-quirks.c
pmem.c
probe_roms.c
process.c
process.h
process_32.c
process_64.c
ptrace.c
pvclock.c
quirks.c
reboot.c
reboot_fixups_32.c
relocate_kernel_32.S
relocate_kernel_64.S
resource.c
rethook.c
rtc.c
setup.c
setup_percpu.c
sev-shared.c
sev.c
sev_verify_cbit.S
signal.c
signal_32.c
signal_64.c
signal_compat.c
smp.c
smpboot.c
stacktrace.c
static_call.c
step.c
sys_ia32.c
sys_x86_64.c
tboot.c
time.c
tls.c
tls.h
topology.c
trace.c
trace_clock.c
tracepoint.c
traps.c
tsc.c
tsc_msr.c
tsc_sync.c
umip.c
unwind_frame.c
unwind_guess.c
unwind_orc.c
uprobes.c
verify_cpu.S
vm86_32.c
vmlinux.lds.S
vsmp_64.c
x86_init.c
kvm
lib
math-emu
mm
net
pci
platform
power
purgatory
ras
realmode
tools
um
video
virt
xen
.gitignore
Kbuild
Kconfig
Kconfig.assembler
Kconfig.cpu
Kconfig.debug
Makefile
Makefile.um
Makefile_32.cpu
xtensa
.gitignore
Kconfig
block
certs
crypto
drivers
fs
include
init
io_uring
ipc
kernel
lib
mm
net
rust
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
.rustfmt.toml
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
arch
/
x86
/
kernel
/
cpu
/
microcode
/
core.c
Blame
Blame
Latest commit
History
History
680 lines (548 loc) · 14.9 KB
Breadcrumbs
linux
/
arch
/
x86
/
kernel
/
cpu
/
microcode
/
core.c
Top
File metadata and controls
Code
Blame
680 lines (548 loc) · 14.9 KB
Raw
// SPDX-License-Identifier: GPL-2.0-or-later /* * CPU Microcode Update Driver for Linux * * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> * 2006 Shaohua Li <shaohua.li@intel.com> * 2013-2016 Borislav Petkov <bp@alien8.de> * * X86 CPU microcode early update for Linux: * * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> * H Peter Anvin" <hpa@zytor.com> * (C) 2015 Borislav Petkov <bp@alien8.de> * * This driver allows to upgrade microcode on x86 processors. */ #define pr_fmt(fmt) "microcode: " fmt #include <linux/platform_device.h> #include <linux/stop_machine.h> #include <linux/syscore_ops.h> #include <linux/miscdevice.h> #include <linux/capability.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/cpu.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/microcode_intel.h> #include <asm/cpu_device_id.h> #include <asm/microcode_amd.h> #include <asm/perf_event.h> #include <asm/microcode.h> #include <asm/processor.h> #include <asm/cmdline.h> #include <asm/setup.h> #define DRIVER_VERSION "2.2" static struct microcode_ops *microcode_ops; static bool dis_ucode_ldr = true; bool initrd_gone; LIST_HEAD(microcode_cache); /* * Synchronization. * * All non cpu-hotplug-callback call sites use: * * - microcode_mutex to synchronize with each other; * - cpus_read_lock/unlock() to synchronize with * the cpu-hotplug-callback call sites. * * We guarantee that only a single cpu is being * updated at any particular moment of time. */ static DEFINE_MUTEX(microcode_mutex); struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; struct cpu_info_ctx { struct cpu_signature *cpu_sig; int err; }; /* * Those patch levels cannot be updated to newer ones and thus should be final. */ static u32 final_levels[] = { 0x01000098, 0x0100009f, 0x010000af, 0, /* T-101 terminator */ }; /* * Check the current patch level on this CPU. * * Returns: * - true: if update should stop * - false: otherwise */ static bool amd_check_current_patch_level(void) { u32 lvl, dummy, i; u32 *levels; native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); if (IS_ENABLED(CONFIG_X86_32)) levels = (u32 *)__pa_nodebug(&final_levels); else levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) return true; } return false; } static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; #ifdef CONFIG_X86_32 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); const char *option = (const char *)__pa_nodebug(__dis_opt_str); bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); #else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = __dis_opt_str; bool *res = &dis_ucode_ldr; #endif /* * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not * completely accurate as xen pv guests don't see that CPUID bit set but * that's good enough as they don't land on the BSP path anyway. */ if (native_cpuid_ecx(1) & BIT(31)) return *res; if (x86_cpuid_vendor() == X86_VENDOR_AMD) { if (amd_check_current_patch_level()) return *res; } if (cmdline_find_option_bool(cmdline, option) <= 0) *res = false; return *res; } void __init load_ucode_bsp(void) { unsigned int cpuid_1_eax; bool intel = true; if (!have_cpuid_p()) return; cpuid_1_eax = native_cpuid_eax(1); switch (x86_cpuid_vendor()) { case X86_VENDOR_INTEL: if (x86_family(cpuid_1_eax) < 6) return; break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) < 0x10) return; intel = false; break; default: return; } if (check_loader_disabled_bsp()) return; if (intel) load_ucode_intel_bsp(); else load_ucode_amd_bsp(cpuid_1_eax); } static bool check_loader_disabled_ap(void) { #ifdef CONFIG_X86_32 return *((bool *)__pa_nodebug(&dis_ucode_ldr)); #else return dis_ucode_ldr; #endif } void load_ucode_ap(void) { unsigned int cpuid_1_eax; if (check_loader_disabled_ap()) return; cpuid_1_eax = native_cpuid_eax(1); switch (x86_cpuid_vendor()) { case X86_VENDOR_INTEL: if (x86_family(cpuid_1_eax) >= 6) load_ucode_intel_ap(); break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_ap(cpuid_1_eax); break; default: break; } } static int __init save_microcode_in_initrd(void) { struct cpuinfo_x86 *c = &boot_cpu_data; int ret = -EINVAL; switch (c->x86_vendor) { case X86_VENDOR_INTEL: if (c->x86 >= 6) ret = save_microcode_in_initrd_intel(); break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; default: break; } initrd_gone = true; return ret; } struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 struct boot_params *params; if (use_pa) params = (struct boot_params *)__pa_nodebug(&boot_params); else params = &boot_params; size = params->hdr.ramdisk_size; /* * Set start only if we have an initrd image. We cannot use initrd_start * because it is not set that early yet. */ if (size) start = params->hdr.ramdisk_image; # else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; start += PAGE_OFFSET; } # endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. * * initrd_gone is for the hotplug case where we've thrown out initrd * already. */ if (!use_pa) { if (initrd_gone) return (struct cpio_data){ NULL, 0, "" }; if (initrd_start) start = initrd_start; } else { /* * The picture with physical addresses is a bit different: we * need to get the *physical* address to which the ramdisk was * relocated, i.e., relocated_ramdisk (not initrd_start) and * since we're running from physical addresses, we need to access * relocated_ramdisk through its *physical* address too. */ u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); if (*rr) start = *rr; } return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ return (struct cpio_data){ NULL, 0, "" }; #endif } void reload_early_microcode(void) { int vendor, family; vendor = x86_cpuid_vendor(); family = x86_cpuid_family(); switch (vendor) { case X86_VENDOR_INTEL: if (family >= 6) reload_ucode_intel(); break; case X86_VENDOR_AMD: if (family >= 0x10) reload_ucode_amd(); break; default: break; } } /* fake device for request_firmware */ static struct platform_device *microcode_pdev; #ifdef CONFIG_MICROCODE_LATE_LOADING /* * Late loading dance. Why the heavy-handed stomp_machine effort? * * - HT siblings must be idle and not execute other code while the other sibling * is loading microcode in order to avoid any negative interactions caused by * the loading. * * - In addition, microcode update on the cores must be serialized until this * requirement can be relaxed in the future. Right now, this is conservative * and good. */ #define SPINUNIT 100 /* 100 nsec */ static int check_online_cpus(void) { unsigned int cpu; /* * Make sure all CPUs are online. It's fine for SMT to be disabled if * all the primary threads are still online. */ for_each_present_cpu(cpu) { if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { pr_err("Not all CPUs online, aborting microcode update.\n"); return -EINVAL; } } return 0; } static atomic_t late_cpus_in; static atomic_t late_cpus_out; static int __wait_for_cpus(atomic_t *t, long long timeout) { int all_cpus = num_online_cpus(); atomic_inc(t); while (atomic_read(t) < all_cpus) { if (timeout < SPINUNIT) { pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", all_cpus - atomic_read(t)); return 1; } ndelay(SPINUNIT); timeout -= SPINUNIT; touch_nmi_watchdog(); } return 0; } /* * Returns: * < 0 - on error * 0 - success (no update done or microcode was updated) */ static int __reload_late(void *info) { int cpu = smp_processor_id(); enum ucode_state err; int ret = 0; /* * Wait for all CPUs to arrive. A load will not be attempted unless all * CPUs show up. * */ if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) return -1; /* * On an SMT system, it suffices to load the microcode on one sibling of * the core because the microcode engine is shared between the threads. * Synchronization still needs to take place so that no concurrent * loading attempts happen on multiple threads of an SMT core. See * below. */ if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) err = microcode_ops->apply_microcode(cpu); else goto wait_for_siblings; if (err >= UCODE_NFOUND) { if (err == UCODE_ERROR) pr_warn("Error reloading microcode on CPU %d\n", cpu); ret = -1; } wait_for_siblings: if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) panic("Timeout during microcode update!\n"); /* * At least one thread has completed update on each core. * For others, simply call the update to make sure the * per-cpu cpuinfo can be updated with right microcode * revision. */ if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) err = microcode_ops->apply_microcode(cpu); return ret; } /* * Reload microcode late on all CPUs. Wait for a sec until they * all gather together. */ static int microcode_reload_late(void) { int old = boot_cpu_data.microcode, ret; struct cpuinfo_x86 prev_info; pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, 0); atomic_set(&late_cpus_out, 0); /* * Take a snapshot before the microcode update in order to compare and * check whether any bits changed after an update. */ store_cpu_caps(&prev_info); ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); if (ret == 0) microcode_check(&prev_info); pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n", old, boot_cpu_data.microcode); return ret; } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { enum ucode_state tmp_ret = UCODE_OK; int bsp = boot_cpu_data.cpu_index; unsigned long val; ssize_t ret = 0; ret = kstrtoul(buf, 0, &val); if (ret) return ret; if (val != 1) return size; cpus_read_lock(); ret = check_online_cpus(); if (ret) goto put; tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); if (tmp_ret != UCODE_NEW) goto put; mutex_lock(µcode_mutex); ret = microcode_reload_late(); mutex_unlock(µcode_mutex); put: cpus_read_unlock(); if (ret == 0) ret = size; add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); return ret; } static DEVICE_ATTR_WO(reload); #endif static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); } static ssize_t processor_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); } static DEVICE_ATTR_RO(version); static DEVICE_ATTR_RO(processor_flags); static struct attribute *mc_default_attrs[] = { &dev_attr_version.attr, &dev_attr_processor_flags.attr, NULL }; static const struct attribute_group mc_attr_group = { .attrs = mc_default_attrs, .name = "microcode", }; static void microcode_fini_cpu(int cpu) { if (microcode_ops->microcode_fini_cpu) microcode_ops->microcode_fini_cpu(cpu); } static enum ucode_state microcode_init_cpu(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; memset(uci, 0, sizeof(*uci)); microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); return microcode_ops->apply_microcode(cpu); } /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ void microcode_bsp_resume(void) { int cpu = smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; if (uci->mc) microcode_ops->apply_microcode(cpu); else reload_early_microcode(); } static struct syscore_ops mc_syscore_ops = { .resume = microcode_bsp_resume, }; static int mc_cpu_starting(unsigned int cpu) { enum ucode_state err = microcode_ops->apply_microcode(cpu); pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); return err == UCODE_ERROR; } static int mc_cpu_online(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; } static int mc_cpu_down_prep(unsigned int cpu) { struct device *dev; dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); pr_debug("%s: CPU%d\n", __func__, cpu); return 0; } static void setup_online_cpu(struct work_struct *work) { int cpu = smp_processor_id(); enum ucode_state err; err = microcode_init_cpu(cpu); if (err == UCODE_ERROR) { pr_err("Error applying microcode on CPU%d\n", cpu); return; } mc_cpu_online(cpu); } static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, #endif NULL }; static const struct attribute_group cpu_root_microcode_group = { .name = "microcode", .attrs = cpu_root_microcode_attrs, }; static int __init microcode_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; int error; if (dis_ucode_ldr) return -EINVAL; if (c->x86_vendor == X86_VENDOR_INTEL) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); else pr_err("no support for this CPU vendor\n"); if (!microcode_ops) return -ENODEV; microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); if (IS_ERR(microcode_pdev)) return PTR_ERR(microcode_pdev); error = sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpu_root_microcode_group); if (error) { pr_err("Error creating microcode group!\n"); goto out_pdev; } /* Do per-CPU setup */ schedule_on_each_cpu(setup_online_cpu); register_syscore_ops(&mc_syscore_ops); cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", mc_cpu_starting, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); return 0; out_pdev: platform_device_unregister(microcode_pdev); return error; } fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
You can’t perform that action at this time.