Skip to content

Commit

Permalink
mm/cma: introduce a cma validate function
Browse files Browse the repository at this point in the history
Define a function to check if a CMA area is valid, which means: do its
ranges not cross any zone boundaries.  Store the result in the newly
created flags for each CMA area, so that multiple calls are dealt with.

This allows for checking the validity of a CMA area early, which is needed
later in order to be able to allocate hugetlb bootmem pages from it with
pre-HVO.

Link: https://lkml.kernel.org/r/20250228182928.2645936-24-fvdl@google.com
Signed-off-by: Frank van der Linden <fvdl@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Frank van der Linden authored and Andrew Morton committed Mar 17, 2025
1 parent b51d3db commit 9320fa2
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 13 deletions.
5 changes: 5 additions & 0 deletions include/linux/cma.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ extern void cma_reserve_pages_on_error(struct cma *cma);
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
bool cma_validate_zones(struct cma *cma);
#else
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
Expand All @@ -70,6 +71,10 @@ static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
{
return false;
}
static inline bool cma_validate_zones(struct cma *cma)
{
return false;
}
#endif

#endif
60 changes: 48 additions & 12 deletions mm/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,49 @@ static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
spin_unlock_irqrestore(&cma->lock, flags);
}

/*
* Check if a CMA area contains no ranges that intersect with
* multiple zones. Store the result in the flags in case
* this gets called more than once.
*/
bool cma_validate_zones(struct cma *cma)
{
int r;
unsigned long base_pfn;
struct cma_memrange *cmr;
bool valid_bit_set;

/*
* If already validated, return result of previous check.
* Either the valid or invalid bit will be set if this
* check has already been done. If neither is set, the
* check has not been performed yet.
*/
valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
return valid_bit_set;

for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
base_pfn = cmr->base_pfn;

/*
* alloc_contig_range() requires the pfn range specified
* to be in the same zone. Simplify by forcing the entire
* CMA resv range to be in the same zone.
*/
WARN_ON_ONCE(!pfn_valid(base_pfn));
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
set_bit(CMA_ZONES_INVALID, &cma->flags);
return false;
}
}

set_bit(CMA_ZONES_VALID, &cma->flags);

return true;
}

static void __init cma_activate_area(struct cma *cma)
{
unsigned long pfn, base_pfn;
Expand All @@ -113,19 +156,12 @@ static void __init cma_activate_area(struct cma *cma)
goto cleanup;
}

if (!cma_validate_zones(cma))
goto cleanup;

for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
base_pfn = cmr->base_pfn;

/*
* alloc_contig_range() requires the pfn range specified
* to be in the same zone. Simplify by forcing the entire
* CMA resv range to be in the same zone.
*/
WARN_ON_ONCE(!pfn_valid(base_pfn));
if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
goto cleanup;

for (pfn = base_pfn; pfn < base_pfn + cmr->count;
pfn += pageblock_nr_pages)
init_cma_reserved_pageblock(pfn_to_page(pfn));
Expand All @@ -145,7 +181,7 @@ static void __init cma_activate_area(struct cma *cma)
bitmap_free(cma->ranges[r].bitmap);

/* Expose all pages to the buddy, they are useless for CMA. */
if (!cma->reserve_pages_on_error) {
if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
for (r = 0; r < allocrange; r++) {
cmr = &cma->ranges[r];
for (pfn = cmr->base_pfn;
Expand All @@ -172,7 +208,7 @@ core_initcall(cma_init_reserved_areas);

void __init cma_reserve_pages_on_error(struct cma *cma)
{
cma->reserve_pages_on_error = true;
set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
}

static int __init cma_new_area(const char *name, phys_addr_t size,
Expand Down
8 changes: 7 additions & 1 deletion mm/cma.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,17 @@ struct cma {
/* kobject requires dynamic object */
struct cma_kobject *cma_kobj;
#endif
bool reserve_pages_on_error;
unsigned long flags;
/* NUMA node (NUMA_NO_NODE if unspecified) */
int nid;
};

enum cma_flags {
CMA_RESERVE_PAGES_ON_ERROR,
CMA_ZONES_VALID,
CMA_ZONES_INVALID,
};

extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned int cma_area_count;

Expand Down

0 comments on commit 9320fa2

Please sign in to comment.