From d7bebcb4a898bd214bbd71107f975b7b9f0bde32 Mon Sep 17 00:00:00 2001 From: Vasily Gorbik Date: Wed, 11 Dec 2024 12:29:40 +0100 Subject: [PATCH] s390: Optimize __pa/__va when RANDOMIZE_IDENTITY_BASE is off Use a zero identity base when CONFIG_RANDOMIZE_IDENTITY_BASE is off, slightly optimizing __pa/__va calculations. Signed-off-by: Vasily Gorbik Acked-by: Alexander Gordeev Signed-off-by: Alexander Gordeev --- arch/s390/boot/startup.c | 5 +++-- arch/s390/include/asm/page.h | 4 ++++ arch/s390/kernel/setup.c | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 807594cf246c1..58e56aed5f7de 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -372,8 +372,9 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); max_mappable = max(ident_map_size, MAX_DCSS_ADDR); max_mappable = min(max_mappable, vmemmap_start); - if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE)) - __identity_base = round_down(vmemmap_start - max_mappable, rte_size); +#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE + __identity_base = round_down(vmemmap_start - max_mappable, rte_size); +#endif boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base, __identity_base + ident_map_size); diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 4f43cdd9835b6..1ff145f7b52b1 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -184,7 +184,11 @@ extern struct vm_layout vm_layout; #define __kaslr_offset vm_layout.kaslr_offset #define __kaslr_offset_phys vm_layout.kaslr_offset_phys +#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE #define __identity_base vm_layout.identity_base +#else +#define __identity_base 0UL +#endif #define ident_map_size vm_layout.identity_size static inline unsigned long kaslr_offset(void) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0fcb2a4e3f5ee..f6765ea4f8281 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -740,7 +740,7 @@ static void __init reserve_lowcore(void) void *lowcore_end = lowcore_start + sizeof(struct lowcore); void *start, *end; - if ((void *)__identity_base < lowcore_end) { + if (absolute_pointer(__identity_base) < lowcore_end) { start = max(lowcore_start, (void *)__identity_base); end = min(lowcore_end, (void *)(__identity_base + ident_map_size)); memblock_reserve(__pa(start), __pa(end));