Skip to content

Commit

Permalink
mm/slub: Convert __free_slab() to use struct slab
Browse files Browse the repository at this point in the history
__free_slab() is on the boundary of distinguishing struct slab and
struct page so start with struct slab but convert to folio for working
with flags and folio_page() to call functions that require struct page.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
  • Loading branch information
Vlastimil Babka committed Jan 6, 2022
1 parent 45387b8 commit 4020b4a
Showing 1 changed file with 13 additions and 14 deletions.
27 changes: 13 additions & 14 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2005,43 +2005,42 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}

static void __free_slab(struct kmem_cache *s, struct page *page)
static void __free_slab(struct kmem_cache *s, struct slab *slab)
{
int order = compound_order(page);
struct folio *folio = slab_folio(slab);
int order = folio_order(folio);
int pages = 1 << order;

if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
void *p;

slab_pad_check(s, page);
for_each_object(p, s, page_address(page),
page->objects)
check_object(s, page, p, SLUB_RED_INACTIVE);
slab_pad_check(s, folio_page(folio, 0));
for_each_object(p, s, slab_address(slab), slab->objects)
check_object(s, folio_page(folio, 0), p, SLUB_RED_INACTIVE);
}

__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
/* In union with page->mapping where page allocator expects NULL */
page->slab_cache = NULL;
__slab_clear_pfmemalloc(slab);
__folio_clear_slab(folio);
folio->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
unaccount_slab(page_slab(page), order, s);
__free_pages(page, order);
unaccount_slab(slab, order, s);
__free_pages(folio_page(folio, 0), order);
}

static void rcu_free_slab(struct rcu_head *h)
{
struct page *page = container_of(h, struct page, rcu_head);

__free_slab(page->slab_cache, page);
__free_slab(page->slab_cache, page_slab(page));
}

static void free_slab(struct kmem_cache *s, struct page *page)
{
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
call_rcu(&page->rcu_head, rcu_free_slab);
} else
__free_slab(s, page);
__free_slab(s, page_slab(page));
}

static void discard_slab(struct kmem_cache *s, struct page *page)
Expand Down

0 comments on commit 4020b4a

Please sign in to comment.