diff --git a/mm/internal.h b/mm/internal.h index 9c941af5bdb6..b831688a71e8 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -740,6 +740,10 @@ extern bool free_pages_prepare(struct page *page, unsigned int order); extern int user_min_free_kbytes; +struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid, + nodemask_t *); +#define __alloc_frozen_pages(...) \ + alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__)) void free_frozen_pages(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index df5b61592792..7a2853b7967d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4713,8 +4713,8 @@ EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); /* * This is the 'heart' of the zoned buddy allocator. */ -struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, - int preferred_nid, nodemask_t *nodemask) +struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, + int preferred_nid, nodemask_t *nodemask) { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; @@ -4770,14 +4770,24 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, free_frozen_pages(page, order); page = NULL; } - if (page) - set_page_refcounted(page); trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); kmsan_alloc_page(page, order, alloc_gfp); return page; } +EXPORT_SYMBOL(__alloc_frozen_pages_noprof); + +struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, + int preferred_nid, nodemask_t *nodemask) +{ + struct page *page; + + page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); + if (page) + set_page_refcounted(page); + return page; +} EXPORT_SYMBOL(__alloc_pages_noprof); struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,