Skip to content

Commit

Permalink
ia64: forbid using VIRTUAL_MEM_MAP with FLATMEM
Browse files Browse the repository at this point in the history
Virtual memory map was intended to avoid wasting memory on the memory map
on systems with large holes in the physical memory layout. Long ago it been
superseded first by DISCONTIGMEM and then by SPARSEMEM. Moreover,
SPARSEMEM_VMEMMAP provide the same functionality in much more portable way.

As the first step to removing the VIRTUAL_MEM_MAP forbid it's usage with
FLATMEM and panic on systems with large holes in the physical memory
layout that try to run FLATMEM kernels.

Link: https://lkml.kernel.org/r/20201101170454.9567-7-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Meelis Roos <mroos@linux.ee>
Cc: Michael Schmitz <schmitzmic@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Mike Rapoport authored and Linus Torvalds committed Dec 15, 2020
1 parent 1f11212 commit ea34f78
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 44 deletions.
2 changes: 1 addition & 1 deletion arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ config NODES_SHIFT
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
bool "Virtual mem map"
depends on !SPARSEMEM
depends on !SPARSEMEM && !FLATMEM
default y
help
Say Y to compile the kernel with support for a virtual mem map.
Expand Down
2 changes: 0 additions & 2 deletions arch/ia64/include/asm/meminit.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,8 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end);
extern int register_active_ranges(u64 start, u64 len, int nid);

#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else
Expand Down
48 changes: 21 additions & 27 deletions arch/ia64/mm/contig.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,12 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/sizes.h>

#include <asm/meminit.h>
#include <asm/sections.h>
#include <asm/mca.h>

#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long max_gap;
#endif

/* physical address where the bootmem map is located */
unsigned long bootmap_start;

Expand Down Expand Up @@ -166,33 +163,30 @@ find_memory (void)
alloc_per_cpu_data();
}

static void __init virtual_map_init(void)
static int __init find_largest_hole(u64 start, u64 end, void *arg)
{
#ifdef CONFIG_VIRTUAL_MEM_MAP
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
} else {
unsigned long map_size;
u64 *max_gap = arg;

/* allocate virtual_mem_map */
static u64 last_end = PAGE_OFFSET;

map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
VMALLOC_END -= map_size;
vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
/* NOTE: this algorithm assumes efi memmap table is ordered */

/*
* alloc_node_mem_map makes an adjustment for mem_map
* which isn't compatible with vmem_map.
*/
NODE_DATA(0)->node_mem_map = vmem_map +
find_min_pfn_with_active_regions();
if (*max_gap < (start - last_end))
*max_gap = start - last_end;
last_end = end;
return 0;
}

printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
static void __init verify_gap_absence(void)
{
unsigned long max_gap;

/* Forbid FLATMEM if hole is > than 1G */
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap >= SZ_1G)
panic("Cannot use FLATMEM with %ldMB hole\n"
"Please switch over to SPARSEMEM\n",
(max_gap >> 20));
}

/*
Expand All @@ -210,7 +204,7 @@ paging_init (void)
max_zone_pfns[ZONE_DMA32] = max_dma;
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

virtual_map_init();
verify_gap_absence();

free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
Expand Down
14 changes: 0 additions & 14 deletions arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -574,20 +574,6 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);

int __init find_largest_hole(u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;

static u64 last_end = PAGE_OFFSET;

/* NOTE: this algorithm assumes efi memmap table is ordered */

if (*max_gap < (start - last_end))
*max_gap = start - last_end;
last_end = end;
return 0;
}

#endif /* CONFIG_VIRTUAL_MEM_MAP */

int __init register_active_ranges(u64 start, u64 len, int nid)
Expand Down

0 comments on commit ea34f78

Please sign in to comment.