Skip to content

Commit

Permalink
slab: Remove some accessors
Browse files Browse the repository at this point in the history
Those are rather trivial now and its better to see inline what is
really going on.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Jun 14, 2012
1 parent e571b0a commit 3502608
Showing 1 changed file with 8 additions and 27 deletions.
35 changes: 8 additions & 27 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,44 +489,25 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;

/*
* Functions for storing/retrieving the cachep and or slab from the page
* allocator. These are used to find the slab an obj belongs to. With kfree(),
* these are used to find the cache which an obj belongs to.
*/
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
page->slab_cache = cache;
}

static inline struct kmem_cache *page_get_cache(struct page *page)
{
page = compound_head(page);
BUG_ON(!PageSlab(page));
return page->slab_cache;
}

static inline void page_set_slab(struct page *page, struct slab *slab)
{
page->slab_page = slab;
}

static inline struct slab *page_get_slab(struct page *page)
{
BUG_ON(!PageSlab(page));
return page->slab_page;
}

static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_head_page(obj);
return page_get_cache(page);
return page->slab_cache;
}

static inline struct slab *virt_to_slab(const void *obj)
{
struct page *page = virt_to_head_page(obj);
return page_get_slab(page);

VM_BUG_ON(!PageSlab(page));
return page->slab_page;
}

static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
Expand Down Expand Up @@ -2918,8 +2899,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
nr_pages <<= cache->gfporder;

do {
page_set_cache(page, cache);
page_set_slab(page, slab);
page->slab_cache = cache;
page->slab_page = slab;
page++;
} while (--nr_pages);
}
Expand Down Expand Up @@ -3057,7 +3038,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_head_page(objp);

slabp = page_get_slab(page);
slabp = page->slab_page;

if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
Expand Down Expand Up @@ -3261,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
struct slab *slabp;
unsigned objnr;

slabp = page_get_slab(virt_to_head_page(objp));
slabp = virt_to_head_page(objp)->slab_page;
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
}
Expand Down

0 comments on commit 3502608

Please sign in to comment.