Skip to content

Commit

Permalink
s390/mm: Restore mapping of kernel image using large pages
Browse files Browse the repository at this point in the history
Since physical and virtual kernel address spaces are uncoupled
the kernel image is not mapped using large segment pages anymore,
which is a regression.

Put the kernel image at the same large segment page offset in
physical memory as in virtual memory. Such approach preserves
the existing number of bits of entropy used for randomization
of the kernel location in virtual memory when KASLR is on.
As result, the kernel is mapped using large segment pages.

Fixes: c98d2ec ("s390/mm: Uncouple physical vs virtual address spaces")
Reported-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
  • Loading branch information
Alexander Gordeev authored and Vasily Gorbik committed Jun 11, 2024
1 parent d8073dc commit 693d41f
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 4 deletions.
27 changes: 24 additions & 3 deletions arch/s390/boot/startup.c
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ static void fixup_vmlinux_info(void)
void startup_kernel(void)
{
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
unsigned long nokaslr_offset_phys = mem_safe_offset();
unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
unsigned long amode31_lma = 0;
unsigned long max_physmem_end;
unsigned long asce_limit;
Expand All @@ -393,6 +393,12 @@ void startup_kernel(void)

fixup_vmlinux_info();
setup_lpp();

/*
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
* aligned (see blow).
*/
nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);

/*
Expand Down Expand Up @@ -425,10 +431,25 @@ void startup_kernel(void)
save_ipl_cert_comp_list();
rescue_initrd(safe_addr, ident_map_size);

if (kaslr_enabled())
__kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size);
/*
* __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
* 20 bits (the offset within a large page) are zero. Copy the last
* 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
* __kaslr_offset_phys.
*
* With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
* are identical, which is required to allow for large mappings of the
* kernel image.
*/
kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
if (kaslr_enabled()) {
unsigned long end = ident_map_size - kaslr_large_page_offset;

__kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
}
if (!__kaslr_offset_phys)
__kaslr_offset_phys = nokaslr_offset_phys;
__kaslr_offset_phys |= kaslr_large_page_offset;
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
deploy_kernel((void *)__kaslr_offset_phys);
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/boot/vmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m

static bool large_allowed(enum populate_mode mode)
{
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY);
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
}

static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
Expand Down
1 change: 1 addition & 0 deletions arch/s390/boot/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ SECTIONS
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = ALIGN(PAGE_SIZE);
. += AMODE31_SIZE; /* .amode31 section */
. = ALIGN(1 << 20); /* _SEGMENT_SIZE */
#else
. = ALIGN(8);
#endif
Expand Down

0 comments on commit 693d41f

Please sign in to comment.