Skip to content

Commit

Permalink
mm/hugetlb: enable bootmem allocation from CMA areas
Browse files Browse the repository at this point in the history
If hugetlb_cma_only is enabled, we know that hugetlb pages can only be
allocated from CMA.  Now that there is an interface to do early
reservations from a CMA area (returning memblock memory), it can be used
to allocate hugetlb pages from CMA.

This also allows for doing pre-HVO on these pages (if enabled).

Make sure to initialize the page structures and associated data correctly.
Create a flag to signal that a hugetlb page has been allocated from CMA
to make things a little easier.

Some configurations of powerpc have a special hugetlb bootmem allocator,
so introduce a boolean arch_specific_huge_bootmem_alloc that returns true
if such an allocator is present.  In that case, CMA bootmem allocations
can't be used, so check that function before trying.

Link: https://lkml.kernel.org/r/20250228182928.2645936-27-fvdl@google.com
Signed-off-by: Frank van der Linden <fvdl@google.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Frank van der Linden authored and Andrew Morton committed Mar 17, 2025
1 parent f866cfc commit d2d7867
Show file tree
Hide file tree
Showing 3 changed files with 152 additions and 39 deletions.
6 changes: 6 additions & 0 deletions arch/powerpc/include/asm/book3s/64/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,4 +94,10 @@ static inline int check_and_get_huge_psize(int shift)
return mmu_psize;
}

#define arch_has_huge_bootmem_alloc arch_has_huge_bootmem_alloc

static inline bool arch_has_huge_bootmem_alloc(void)
{
return (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled());
}
#endif
17 changes: 17 additions & 0 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -591,6 +591,7 @@ enum hugetlb_page_flags {
HPG_freed,
HPG_vmemmap_optimized,
HPG_raw_hwp_unreliable,
HPG_cma,
__NR_HPAGEFLAGS,
};

Expand Down Expand Up @@ -650,6 +651,7 @@ HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
HPAGEFLAG(Cma, cma)

#ifdef CONFIG_HUGETLB_PAGE

Expand Down Expand Up @@ -678,14 +680,18 @@ struct hstate {
char name[HSTATE_NAME_LEN];
};

struct cma;

struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
unsigned long flags;
struct cma *cma;
};

#define HUGE_BOOTMEM_HVO 0x0001
#define HUGE_BOOTMEM_ZONES_VALID 0x0002
#define HUGE_BOOTMEM_CMA 0x0004

bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);

Expand Down Expand Up @@ -824,6 +830,17 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
}
#endif

#ifndef arch_has_huge_bootmem_alloc
/*
* Some architectures do their own bootmem allocation, so they can't use
* early CMA allocation.
*/
static inline bool arch_has_huge_bootmem_alloc(void)
{
return false;
}
#endif

static inline struct hstate *folio_hstate(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
Expand Down
168 changes: 129 additions & 39 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,10 @@ static void hugetlb_free_folio(struct folio *folio)
#ifdef CONFIG_CMA
int nid = folio_nid(folio);

if (cma_free_folio(hugetlb_cma[nid], folio))
if (folio_test_hugetlb_cma(folio)) {
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
return;
}
#endif
folio_put(folio);
}
Expand Down Expand Up @@ -1508,6 +1510,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
break;
}
}

if (folio)
folio_set_hugetlb_cma(folio);
}
#endif
if (!folio) {
Expand Down Expand Up @@ -3186,6 +3191,86 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
return ERR_PTR(-ENOSPC);
}

static bool __init hugetlb_early_cma(struct hstate *h)
{
if (arch_has_huge_bootmem_alloc())
return false;

return (hstate_is_gigantic(h) && hugetlb_cma_only);
}

static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
{
struct huge_bootmem_page *m;
unsigned long flags;
struct cma *cma;
int listnode = nid;

#ifdef CONFIG_CMA
if (hugetlb_early_cma(h)) {
flags = HUGE_BOOTMEM_CMA;
cma = hugetlb_cma[nid];
m = cma_reserve_early(cma, huge_page_size(h));
if (!m) {
int node;

if (node_exact)
return NULL;
for_each_online_node(node) {
cma = hugetlb_cma[node];
if (!cma || node == nid)
continue;
m = cma_reserve_early(cma, huge_page_size(h));
if (m) {
listnode = node;
break;
}
}
}
} else
#endif
{
flags = 0;
cma = NULL;
if (node_exact)
m = memblock_alloc_exact_nid_raw(huge_page_size(h),
huge_page_size(h), 0,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
else {
m = memblock_alloc_try_nid_raw(huge_page_size(h),
huge_page_size(h), 0,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
/*
* For pre-HVO to work correctly, pages need to be on
* the list for the node they were actually allocated
* from. That node may be different in the case of
* fallback by memblock_alloc_try_nid_raw. So,
* extract the actual node first.
*/
if (m)
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
}
}

if (m) {
/*
* Use the beginning of the huge page to store the
* huge_bootmem_page struct (until gather_bootmem
* puts them into the mem_map).
*
* Put them into a private list first because mem_map
* is not up yet.
*/
INIT_LIST_HEAD(&m->list);
list_add(&m->list, &huge_boot_pages[listnode]);
m->hstate = h;
m->flags = flags;
m->cma = cma;
}

return m;
}

int alloc_bootmem_huge_page(struct hstate *h, int nid)
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
Expand All @@ -3195,22 +3280,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)

/* do node specific alloc */
if (nid != NUMA_NO_NODE) {
m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h),
0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
m = alloc_bootmem(h, node, true);
if (!m)
return 0;
goto found;
}

/* allocate from next node when distributing huge pages */
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) {
m = memblock_alloc_try_nid_raw(
huge_page_size(h), huge_page_size(h),
0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
/*
* Use the beginning of the huge page to store the
* huge_bootmem_page struct (until gather_bootmem
* puts them into the mem_map).
*/
m = alloc_bootmem(h, node, false);
if (!m)
return 0;
goto found;
Expand All @@ -3228,21 +3306,6 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
huge_page_size(h) - PAGE_SIZE);

/*
* Put them into a private list first because mem_map is not up yet.
*
* For pre-HVO to work correctly, pages need to be on the list for
* the node they were actually allocated from. That node may be
* different in the case of fallback by memblock_alloc_try_nid_raw.
* So, extract the actual node first.
*/
if (nid == NUMA_NO_NODE)
node = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));

INIT_LIST_HEAD(&m->list);
list_add(&m->list, &huge_boot_pages[node]);
m->hstate = h;
m->flags = 0;
return 1;
}

Expand Down Expand Up @@ -3283,13 +3346,25 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
prep_compound_head((struct page *)folio, huge_page_order(h));
}

static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
{
return m->flags & HUGE_BOOTMEM_HVO;
}

static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
{
return m->flags & HUGE_BOOTMEM_CMA;
}

/*
* memblock-allocated pageblocks might not have the migrate type set
* if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
* here.
* here, or MIGRATE_CMA if this was a page allocated through an early CMA
* reservation.
*
* Note that this will not write the page struct, it is ok (and necessary)
* to do this on vmemmap optimized folios.
* In case of vmemmap optimized folios, the tail vmemmap pages are mapped
* read-only, but that's ok - for sparse vmemmap this does not write to
* the page structure.
*/
static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
struct hstate *h)
Expand All @@ -3298,9 +3373,13 @@ static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,

WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));

for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(folio_page(folio, i),
for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
if (folio_test_hugetlb_cma(folio))
init_cma_pageblock(folio_page(folio, i));
else
set_pageblock_migratetype(folio_page(folio, i),
MIGRATE_MOVABLE);
}
}

static void __init prep_and_add_bootmem_folios(struct hstate *h,
Expand Down Expand Up @@ -3346,10 +3425,16 @@ bool __init hugetlb_bootmem_page_zones_valid(int nid,
return true;
}

if (hugetlb_bootmem_page_earlycma(m)) {
valid = cma_validate_zones(m->cma);
goto out;
}

start_pfn = virt_to_phys(m) >> PAGE_SHIFT;

valid = !pfn_range_intersects_zones(nid, start_pfn,
pages_per_huge_page(m->hstate));
out:
if (!valid)
hstate_boot_nrinvalid[hstate_index(m->hstate)]++;

Expand Down Expand Up @@ -3378,11 +3463,6 @@ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
}
}

static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
{
return (m->flags & HUGE_BOOTMEM_HVO);
}

/*
* Put bootmem huge pages into the standard lists after mem_map is up.
* Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
Expand Down Expand Up @@ -3432,14 +3512,21 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid)
*/
folio_set_hugetlb_vmemmap_optimized(folio);

if (hugetlb_bootmem_page_earlycma(m))
folio_set_hugetlb_cma(folio);

list_add(&folio->lru, &folio_list);

/*
* We need to restore the 'stolen' pages to totalram_pages
* in order to fix confusing memory reports from free(1) and
* other side-effects, like CommitLimit going negative.
*
* For CMA pages, this is done in init_cma_pageblock
* (via hugetlb_bootmem_init_migratetype), so skip it here.
*/
adjust_managed_page_count(page, pages_per_huge_page(h));
if (!folio_test_hugetlb_cma(folio))
adjust_managed_page_count(page, pages_per_huge_page(h));
cond_resched();
}

Expand Down Expand Up @@ -3624,8 +3711,11 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long allocated;

/* skip gigantic hugepages allocation if hugetlb_cma enabled */
if (hstate_is_gigantic(h) && hugetlb_cma_size) {
/*
* Skip gigantic hugepages allocation if early CMA
* reservations are not available.
*/
if (hstate_is_gigantic(h) && hugetlb_cma_size && !hugetlb_early_cma(h)) {
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
return;
}
Expand Down

0 comments on commit d2d7867

Please sign in to comment.