Skip to content

Commit

Permalink
mm/slub: Convert alloc_slab_page() to return a struct slab
Browse files Browse the repository at this point in the history
Preparatory, callers convert back to struct page for now.

Also move setting page flags to alloc_slab_page() where we still operate
on a struct page. This means the page->slab_cache pointer is now set
later than the PageSlab flag, which could theoretically confuse some pfn
walker assuming PageSlab means there would be a valid cache pointer. But
as the code had no barriers and used __set_bit() anyway, it could have
happened already, so there shouldn't be such a walker.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
  • Loading branch information
Vlastimil Babka committed Jan 6, 2022
1 parent fb012e2 commit 45387b8
Showing 1 changed file with 16 additions and 10 deletions.
26 changes: 16 additions & 10 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1788,18 +1788,27 @@ static void *setup_object(struct kmem_cache *s, struct page *page,
/*
* Slab allocation and freeing
*/
static inline struct page *alloc_slab_page(struct kmem_cache *s,
static inline struct slab *alloc_slab_page(struct kmem_cache *s,
gfp_t flags, int node, struct kmem_cache_order_objects oo)
{
struct page *page;
struct folio *folio;
struct slab *slab;
unsigned int order = oo_order(oo);

if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
folio = (struct folio *)alloc_pages(flags, order);
else
page = __alloc_pages_node(node, flags, order);
folio = (struct folio *)__alloc_pages_node(node, flags, order);

return page;
if (!folio)
return NULL;

slab = folio_slab(folio);
__folio_set_slab(folio);
if (page_is_pfmemalloc(folio_page(folio, 0)))
slab_set_pfmemalloc(slab);

return slab;
}

#ifdef CONFIG_SLAB_FREELIST_RANDOM
Expand Down Expand Up @@ -1932,15 +1941,15 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);

page = alloc_slab_page(s, alloc_gfp, node, oo);
page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo));
if (unlikely(!page)) {
oo = s->min;
alloc_gfp = flags;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
page = alloc_slab_page(s, alloc_gfp, node, oo);
page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo));
if (unlikely(!page))
goto out;
stat(s, ORDER_FALLBACK);
Expand All @@ -1951,9 +1960,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
account_slab(page_slab(page), oo_order(oo), s, flags);

page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);

kasan_poison_slab(page);

Expand Down

0 comments on commit 45387b8

Please sign in to comment.