Skip to content

Commit

Permalink
Merge tag 'x86_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Add a terminating zero end-element to the array describing AMD CPUs
   affected by erratum 1386 so that the matching loop actually
   terminates instead of going off into the weeds

 - Update the boot protocol documentation to mention the fact that the
   preferred address to load the kernel to is considered in the
   relocatable kernel case too

 - Flush the memory buffer containing the microcode patch after applying
   microcode on AMD Zen1 and Zen2, to avoid unnecessary slowdowns

 - Make sure the PPIN CPU feature flag is cleared on all CPUs if PPIN
   has been disabled

* tag 'x86_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/CPU/AMD: Terminate the erratum_1386_microcode array
  x86/Documentation: Update algo in init_size description of boot protocol
  x86/microcode/AMD: Flush patch buffer mapping after application
  x86/mm: Carve out INVLPG inline asm for use by others
  x86/cpu: Fix PPIN initialization
  • Loading branch information
Linus Torvalds committed Dec 1, 2024
2 parents 9022ed0 + ff6cdc4 commit 58ac609
Show file tree
Hide file tree
Showing 6 changed files with 41 additions and 11 deletions.
17 changes: 13 additions & 4 deletions Documentation/arch/x86/boot.rst
Original file line number Diff line number Diff line change
Expand Up @@ -896,10 +896,19 @@ Offset/size: 0x260/4

The kernel runtime start address is determined by the following algorithm::

if (relocatable_kernel)
runtime_start = align_up(load_address, kernel_alignment)
else
runtime_start = pref_address
if (relocatable_kernel) {
if (load_address < pref_address)
load_address = pref_address;
runtime_start = align_up(load_address, kernel_alignment);
} else {
runtime_start = pref_address;
}

Hence the necessary memory window location and size can be estimated by
a boot loader as::

memory_window_start = runtime_start;
memory_window_size = init_size;

============ ===============
Field name: handover_offset
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,8 @@ static inline void __tlb_remove_table(void *table)
free_page_and_swap_cache(table);
}

static inline void invlpg(unsigned long addr)
{
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
#endif /* _ASM_X86_TLB_H */
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -798,6 +798,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static const struct x86_cpu_desc erratum_1386_microcode[] = {
AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e),
AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
{},
};

static void fix_erratum_1386(struct cpuinfo_x86 *c)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ static void ppin_init(struct cpuinfo_x86 *c)
}

clear_ppin:
clear_cpu_cap(c, info->feature);
setup_clear_cpu_cap(info->feature);
}

static void default_init(struct cpuinfo_x86 *c)
Expand Down
25 changes: 20 additions & 5 deletions arch/x86/kernel/cpu/microcode/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
#include <asm/tlb.h>

#include "internal.h"

Expand Down Expand Up @@ -483,11 +484,25 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
}
}

static int __apply_microcode_amd(struct microcode_amd *mc)
static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
{
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
u32 rev, dummy;

native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);

if (x86_family(bsp_cpuid_1_eax) == 0x17) {
unsigned long p_addr_end = p_addr + psize - 1;

invlpg(p_addr);

/*
* Flush next page too if patch image is crossing a page
* boundary.
*/
if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
invlpg(p_addr_end);
}

/* verify patch application was successful */
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
Expand Down Expand Up @@ -529,7 +544,7 @@ static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
if (old_rev > mc->hdr.patch_id)
return ret;

return !__apply_microcode_amd(mc);
return !__apply_microcode_amd(mc, desc.psize);
}

static bool get_builtin_microcode(struct cpio_data *cp)
Expand Down Expand Up @@ -745,7 +760,7 @@ void reload_ucode_amd(unsigned int cpu)
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);

if (rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc))
if (!__apply_microcode_amd(mc, p->size))
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
}
}
Expand Down Expand Up @@ -798,7 +813,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
goto out;
}

if (__apply_microcode_amd(mc_amd)) {
if (__apply_microcode_amd(mc_amd, p->size)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
#include <asm/tlb.h>

#include "mm_internal.h"

Expand Down Expand Up @@ -1140,7 +1141,7 @@ STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
bool cpu_pcide;

/* Flush 'addr' from the kernel PCID: */
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
invlpg(addr);

/* If PTI is off there is no user PCID and nothing to flush. */
if (!static_cpu_has(X86_FEATURE_PTI))
Expand Down

0 comments on commit 58ac609

Please sign in to comment.