Skip to content

Commit

Permalink
Merge tag 'x86-boot-2025-03-22' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/tip/tip

Pull x86 boot code updates from Ingo Molnar:

 - Memblock setup and other early boot code cleanups (Mike Rapoport)

 - Export e820_table_kexec[] to sysfs (Dave Young)

 - Baby steps of adding relocate_kernel() debugging support (David
   Woodhouse)

 - Replace open-coded parity calculation with parity8() (Kuan-Wei Chiu)

 - Move the LA57 trampoline to separate source file (Ard Biesheuvel)

 - Misc micro-optimizations (Uros Bizjak)

 - Drop obsolete E820_TYPE_RESERVED_KERN and related code (Mike
   Rapoport)

* tag 'x86-boot-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/kexec: Add relocate_kernel() debugging support: Load a GDT
  x86/boot: Move the LA57 trampoline to separate source file
  x86/boot: Do not test if AC and ID eflags are changeable on x86_64
  x86/bootflag: Replace open-coded parity calculation with parity8()
  x86/bootflag: Micro-optimize sbf_write()
  x86/boot: Add missing has_cpuflag() prototype
  x86/kexec: Export e820_table_kexec[] to sysfs
  x86/boot: Change some static bootflag functions to bool
  x86/e820: Drop obsolete E820_TYPE_RESERVED_KERN and related code
  x86/boot: Split parsing of boot_params into the parse_boot_params() helper function
  x86/boot: Split kernel resources setup into the setup_kernel_resources() helper function
  x86/boot: Move setting of memblock parameters to e820__memblock_setup()
  • Loading branch information
Linus Torvalds committed Mar 25, 2025
2 parents ebfb94d + b25eb5f commit b58386a
Show file tree
Hide file tree
Showing 14 changed files with 266 additions and 306 deletions.
1 change: 1 addition & 0 deletions arch/x86/boot/compressed/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
vmlinux-objs-y += $(obj)/la57toggle.o
endif

vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
Expand Down
103 changes: 0 additions & 103 deletions arch/x86/boot/compressed/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -483,110 +483,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
jmp *%rax
SYM_FUNC_END(.Lrelocated)

/*
* This is the 32-bit trampoline that will be copied over to low memory. It
* will be called using the ordinary 64-bit calling convention from code
* running in 64-bit mode.
*
* Return address is at the top of the stack (might be above 4G).
* The first argument (EDI) contains the address of the temporary PGD level
* page table in 32-bit addressable memory which will be programmed into
* register CR3.
*/
.section ".rodata", "a", @progbits
SYM_CODE_START(trampoline_32bit_src)
/*
* Preserve callee save 64-bit registers on the stack: this is
* necessary because the architecture does not guarantee that GPRs will
* retain their full 64-bit values across a 32-bit mode switch.
*/
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbp
pushq %rbx

/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
movq %rsp, %rbx
shrq $32, %rbx

/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
pushq $__KERNEL32_CS
leaq 0f(%rip), %rax
pushq %rax
lretq

/*
* The 32-bit code below will do a far jump back to long mode and end
* up here after reconfiguring the number of paging levels. First, the
* stack pointer needs to be restored to its full 64-bit value before
* the callee save register contents can be popped from the stack.
*/
.Lret:
shlq $32, %rbx
orq %rbx, %rsp

/* Restore the preserved 64-bit registers */
popq %rbx
popq %rbp
popq %r12
popq %r13
popq %r14
popq %r15
retq

.code32
0:
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0

/* Point CR3 to the trampoline's new top level page table */
movl %edi, %cr3

/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
/* Avoid writing EFER if no change was made (for TDX guest) */
jc 1f
wrmsr
1:
/* Toggle CR4.LA57 */
movl %cr4, %eax
btcl $X86_CR4_LA57_BIT, %eax
movl %eax, %cr4

/* Enable paging again. */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0

/*
* Return to the 64-bit calling code using LJMP rather than LRET, to
* avoid the need for a 32-bit addressable stack. The destination
* address will be adjusted after the template code is copied into a
* 32-bit addressable buffer.
*/
.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
SYM_CODE_END(trampoline_32bit_src)

/*
* This symbol is placed right after trampoline_32bit_src() so its address can
* be used to infer the size of the trampoline code.
*/
SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)

/*
* The trampoline code has a size limit.
* Make sure we fail to compile if the trampoline code grows
* beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
*/
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE

.text
SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
Expand Down
112 changes: 112 additions & 0 deletions arch/x86/boot/compressed/la57toggle.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
/* SPDX-License-Identifier: GPL-2.0 */

#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/boot.h>
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include "pgtable.h"

/*
* This is the 32-bit trampoline that will be copied over to low memory. It
* will be called using the ordinary 64-bit calling convention from code
* running in 64-bit mode.
*
* Return address is at the top of the stack (might be above 4G).
* The first argument (EDI) contains the address of the temporary PGD level
* page table in 32-bit addressable memory which will be programmed into
* register CR3.
*/

.section ".rodata", "a", @progbits
SYM_CODE_START(trampoline_32bit_src)
/*
* Preserve callee save 64-bit registers on the stack: this is
* necessary because the architecture does not guarantee that GPRs will
* retain their full 64-bit values across a 32-bit mode switch.
*/
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbp
pushq %rbx

/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
movq %rsp, %rbx
shrq $32, %rbx

/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
pushq $__KERNEL32_CS
leaq 0f(%rip), %rax
pushq %rax
lretq

/*
* The 32-bit code below will do a far jump back to long mode and end
* up here after reconfiguring the number of paging levels. First, the
* stack pointer needs to be restored to its full 64-bit value before
* the callee save register contents can be popped from the stack.
*/
.Lret:
shlq $32, %rbx
orq %rbx, %rsp

/* Restore the preserved 64-bit registers */
popq %rbx
popq %rbp
popq %r12
popq %r13
popq %r14
popq %r15
retq

.code32
0:
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0

/* Point CR3 to the trampoline's new top level page table */
movl %edi, %cr3

/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
/* Avoid writing EFER if no change was made (for TDX guest) */
jc 1f
wrmsr
1:
/* Toggle CR4.LA57 */
movl %cr4, %eax
btcl $X86_CR4_LA57_BIT, %eax
movl %eax, %cr4

/* Enable paging again. */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0

/*
* Return to the 64-bit calling code using LJMP rather than LRET, to
* avoid the need for a 32-bit addressable stack. The destination
* address will be adjusted after the template code is copied into a
* 32-bit addressable buffer.
*/
.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
SYM_CODE_END(trampoline_32bit_src)

/*
* This symbol is placed right after trampoline_32bit_src() so its address can
* be used to infer the size of the trampoline code.
*/
SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)

/*
* The trampoline code has a size limit.
* Make sure we fail to compile if the trampoline code grows
* beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
*/
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
26 changes: 9 additions & 17 deletions arch/x86/boot/cpuflags.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,40 +28,32 @@ static int has_fpu(void)
return fsw == 0 && (fcw & 0x103f) == 0x003f;
}

#ifdef CONFIG_X86_32
/*
* For building the 16-bit code we want to explicitly specify 32-bit
* push/pop operations, rather than just saying 'pushf' or 'popf' and
* letting the compiler choose. But this is also included from the
* compressed/ directory where it may be 64-bit code, and thus needs
* to be 'pushfq' or 'popfq' in that case.
* letting the compiler choose.
*/
#ifdef __x86_64__
#define PUSHF "pushfq"
#define POPF "popfq"
#else
#define PUSHF "pushfl"
#define POPF "popfl"
#endif

int has_eflag(unsigned long mask)
bool has_eflag(unsigned long mask)
{
unsigned long f0, f1;

asm volatile(PUSHF " \n\t"
PUSHF " \n\t"
asm volatile("pushfl \n\t"
"pushfl \n\t"
"pop %0 \n\t"
"mov %0,%1 \n\t"
"xor %2,%1 \n\t"
"push %1 \n\t"
POPF " \n\t"
PUSHF " \n\t"
"popfl \n\t"
"pushfl \n\t"
"pop %1 \n\t"
POPF
"popfl"
: "=&r" (f0), "=&r" (f1)
: "ri" (mask));

return !!((f0^f1) & mask);
}
#endif

void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d)
{
Expand Down
7 changes: 6 additions & 1 deletion arch/x86/boot/cpuflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,13 @@ struct cpu_features {
extern struct cpu_features cpu;
extern u32 cpu_vendor[3];

int has_eflag(unsigned long mask);
#ifdef CONFIG_X86_32
bool has_eflag(unsigned long mask);
#else
static inline bool has_eflag(unsigned long mask) { return true; }
#endif
void get_cpuflags(void);
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d);
bool has_cpuflag(int flag);

#endif
1 change: 0 additions & 1 deletion arch/x86/include/asm/e820/api.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ extern unsigned long e820__end_of_low_ram_pfn(void);
extern u64 e820__memblock_alloc_reserved(u64 size, u64 align);
extern void e820__memblock_setup(void);

extern void e820__reserve_setup_data(void);
extern void e820__finish_early_params(void);
extern void e820__reserve_resources(void);
extern void e820__reserve_resources_late(void);
Expand Down
9 changes: 0 additions & 9 deletions arch/x86/include/asm/e820/types.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,6 @@ enum e820_type {
* marking it with the IORES_DESC_SOFT_RESERVED designation.
*/
E820_TYPE_SOFT_RESERVED = 0xefffffff,

/*
* Reserved RAM used by the kernel itself if
* CONFIG_INTEL_TXT=y is enabled, memory of this type
* will be included in the S3 integrity calculation
* and so should not include any memory that the BIOS
* might alter over the S3 transition:
*/
E820_TYPE_RESERVED_KERN = 128,
};

/*
Expand Down
29 changes: 8 additions & 21 deletions arch/x86/kernel/bootflag.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <asm/io.h>

#include <linux/mc146818rtc.h>
Expand All @@ -20,27 +21,13 @@

int sbf_port __initdata = -1; /* set via acpi_boot_init() */

static int __init parity(u8 v)
{
int x = 0;
int i;

for (i = 0; i < 8; i++) {
x ^= (v & 1);
v >>= 1;
}

return x;
}

static void __init sbf_write(u8 v)
{
unsigned long flags;

if (sbf_port != -1) {
v &= ~SBF_PARITY;
if (!parity(v))
v |= SBF_PARITY;
if (!parity8(v))
v ^= SBF_PARITY;

printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n",
sbf_port, v);
Expand All @@ -66,14 +53,14 @@ static u8 __init sbf_read(void)
return v;
}

static int __init sbf_value_valid(u8 v)
static bool __init sbf_value_valid(u8 v)
{
if (v & SBF_RESERVED) /* Reserved bits */
return 0;
if (!parity(v))
return 0;
return false;
if (!parity8(v))
return false;

return 1;
return true;
}

static int __init sbf_init(void)
Expand Down
Loading

0 comments on commit b58386a

Please sign in to comment.