Skip to content

Commit

Permalink
s390/boot: Add startup debugging support
Browse files Browse the repository at this point in the history
Add boot_debug() calls to log various memory layout decisions and
randomization details during early startup, improving debugging
capabilities.

Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
  • Loading branch information
Vasily Gorbik authored and Alexander Gordeev committed Jan 26, 2025
1 parent 418b4d5 commit ec6f9f7
Showing 1 changed file with 26 additions and 2 deletions.
28 changes: 26 additions & 2 deletions arch/s390/boot/startup.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "startup: " fmt
#include <linux/string.h>
#include <linux/elf.h>
#include <asm/page-states.h>
Expand Down Expand Up @@ -223,12 +224,16 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
if (oldmem_data.start) {
__kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size);
boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) {
__kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
ident_map_size = min(ident_map_size, hsa_size);
boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
}
}
#endif
boot_debug("Identity map size: 0x%016lx\n", ident_map_size);
}

#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
Expand Down Expand Up @@ -266,6 +271,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
boot_debug("vmem size estimated: 0x%016lx\n", vsize);
if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
(vsize > _REGION2_SIZE && kaslr_enabled())) {
asce_limit = _REGION1_SIZE;
Expand All @@ -289,8 +295,10 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
* otherwise asce_limit and rte_size would have been adjusted.
*/
vmax = adjust_to_uv_max(asce_limit);
boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
#ifdef CONFIG_KASAN
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
Expand All @@ -304,19 +312,27 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
pos = 0;
kernel_end = vmax - pos * THREAD_SIZE;
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start,
kernel_size + kernel_size);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
boot_debug("The kernel base address is forced to %lx\n", kernel_start);
boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start,
kernel_start + kernel_size);
} else {
kernel_start = __NO_KASLR_START_KERNEL;
boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
kernel_start + kernel_size);
}
__kaslr_offset = kernel_start;
boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset);

MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
if (IS_ENABLED(CONFIG_KMSAN))
VMALLOC_END -= MODULES_LEN * 2;
boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);

/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
Expand All @@ -328,10 +344,15 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
VMALLOC_END -= vmalloc_size * 2;
}
VMALLOC_START = VMALLOC_END - vmalloc_size;
boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);

__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area,
__memcpy_real_area + MEMCPY_REAL_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore,
__abs_lowcore + ABS_LOWCORE_MAP_SIZE);

/* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
Expand All @@ -353,6 +374,8 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
max_mappable = min(max_mappable, vmemmap_start);
if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE))
__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
__identity_base + ident_map_size);

return asce_limit;
}
Expand Down Expand Up @@ -542,5 +565,6 @@ void startup_kernel(void)
*/
psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS;
boot_debug("Starting kernel at: 0x%016lx\n", psw.addr);
__load_psw(psw);
}

0 comments on commit ec6f9f7

Please sign in to comment.