Skip to content

Commit

Permalink
mm/mempolicy: add alloc_frozen_pages()
Browse files Browse the repository at this point in the history
Provide an interface to allocate pages from the page allocator without
incrementing their refcount.  This saves an atomic operation on free,
which may be beneficial to some users (eg slab).

Link: https://lkml.kernel.org/r/20241125210149.2976098-15-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and Andrew Morton committed Jan 14, 2025
1 parent 49249a2 commit 6429752
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 17 deletions.
12 changes: 12 additions & 0 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -747,6 +747,18 @@ struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
void free_frozen_pages(struct page *page, unsigned int order);
void free_unref_folios(struct folio_batch *fbatch);

#ifdef CONFIG_NUMA
struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
#else
static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
{
return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
}
#endif

#define alloc_frozen_pages(...) \
alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))

extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
Expand Down
49 changes: 32 additions & 17 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -2205,9 +2205,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
*/
preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
page = __alloc_frozen_pages_noprof(preferred_gfp, order, nid, nodemask);
if (!page)
page = __alloc_pages_noprof(gfp, order, nid, NULL);
page = __alloc_frozen_pages_noprof(gfp, order, nid, NULL);

return page;
}
Expand Down Expand Up @@ -2253,8 +2253,9 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
* First, try to allocate THP only on local node, but
* don't reclaim unnecessarily, just compact.
*/
page = __alloc_pages_node_noprof(nid,
gfp | __GFP_THISNODE | __GFP_NORETRY, order);
page = __alloc_frozen_pages_noprof(
gfp | __GFP_THISNODE | __GFP_NORETRY, order,
nid, NULL);
if (page || !(gfp & __GFP_DIRECT_RECLAIM))
return page;
/*
Expand All @@ -2266,7 +2267,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
}
}

page = __alloc_pages_noprof(gfp, order, nid, nodemask);
page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask);

if (unlikely(pol->mode == MPOL_INTERLEAVE ||
pol->mode == MPOL_WEIGHTED_INTERLEAVE) && page) {
Expand All @@ -2285,8 +2286,13 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *pol, pgoff_t ilx, int nid)
{
return page_rmappable_folio(alloc_pages_mpol(gfp | __GFP_COMP,
order, pol, ilx, nid));
struct page *page = alloc_pages_mpol(gfp | __GFP_COMP, order, pol,
ilx, nid);
if (!page)
return NULL;

set_page_refcounted(page);
return page_rmappable_folio(page);
}

/**
Expand Down Expand Up @@ -2321,6 +2327,21 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct
}
EXPORT_SYMBOL(vma_alloc_folio_noprof);

struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;

/*
* No reference counting needed for current->mempolicy
* nor system default_policy
*/
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);

return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX,
numa_node_id());
}

/**
* alloc_pages - Allocate pages.
* @gfp: GFP flags.
Expand All @@ -2337,17 +2358,11 @@ EXPORT_SYMBOL(vma_alloc_folio_noprof);
*/
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
{
struct mempolicy *pol = &default_policy;

/*
* No reference counting needed for current->mempolicy
* nor system default_policy
*/
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);
struct page *page = alloc_frozen_pages_noprof(gfp, order);

return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX,
numa_node_id());
if (page)
set_page_refcounted(page);
return page;
}
EXPORT_SYMBOL(alloc_pages_noprof);

Expand Down

0 comments on commit 6429752

Please sign in to comment.