Skip to content

Commit

Permalink
s390/boot: Swap vmalloc and Lowcore/Real Memory Copy areas
Browse files Browse the repository at this point in the history
This is a preparatory rework to allow uncoupling virtual
and physical addresses spaces.

Currently the order of virtual memory areas is (the lowcore
and .amode31 section are skipped, as it is irrelevant):

	identity mapping (the kernel is contained within)
	vmemmap
	vmalloc
	modules
	Absolute Lowcore
	Real Memory Copy

In the future the kernel will be mapped separately and placed
to the end of the virtual address space, so the layout would
turn like this:

	identity mapping
	vmemmap
	vmalloc
	modules
	Absolute Lowcore
	Real Memory Copy
	kernel

However, the distance between kernel and modules needs to be as
little as possible, ideally - none. Thus, the Absolute Lowcore
and Real Memory Copy areas would stay in the way and therefore
need to be moved as well:

	identity mapping
	vmemmap
	Absolute Lowcore
	Real Memory Copy
	vmalloc
	modules
	kernel

To facilitate such layout swap the vmalloc and Absolute Lowcore
together with Real Memory Copy areas. As result, the current
layout turns into:

	identity mapping (the kernel is contained within)
	vmemmap
	Absolute Lowcore
	Real Memory Copy
	vmalloc
	modules

This will allow to locate the kernel directly next to the
modules once it gets mapped separately.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
  • Loading branch information
Alexander Gordeev committed Apr 17, 2024
1 parent ecf74da commit c8aef26
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 10 deletions.
20 changes: 11 additions & 9 deletions arch/s390/boot/startup.c
Original file line number Diff line number Diff line change
Expand Up @@ -297,28 +297,30 @@ static unsigned long setup_kernel_memory_layout(void)
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
__memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
MODULES_END = round_down(vmax, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;

/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE);
vsize = (VMALLOC_END - (MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE)) / 2;
vsize = round_down(vsize, _SEGMENT_SIZE);
vmalloc_size = min(vmalloc_size, vsize);
VMALLOC_START = VMALLOC_END - vmalloc_size;

__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));

/* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages);
/* keep vmemmap_start aligned to a top level region table entry */
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
/* make sure identity map doesn't overlay with vmemmap */
ident_map_size = min(ident_map_size, vmemmap_start);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
/* make sure vmemmap doesn't overlay with vmalloc area */
if (vmemmap_start + vmemmap_size > VMALLOC_START) {
/* make sure vmemmap doesn't overlay with absolute lowcore area */
if (vmemmap_start + vmemmap_size > __abs_lowcore) {
vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
}
Expand Down
3 changes: 2 additions & 1 deletion arch/s390/mm/vmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/sort.h>
#include <asm/page-states.h>
#include <asm/abs_lowcore.h>
#include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
#include <asm/ctlreg.h>
Expand Down Expand Up @@ -436,7 +437,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
if (WARN_ON_ONCE(end > VMALLOC_START))
if (WARN_ON_ONCE(end > __abs_lowcore))
return -EINVAL;
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
Expand Down

0 comments on commit c8aef26

Please sign in to comment.