Skip to content

Commit

Permalink
mm/sl*b: Differentiate struct slab fields by sl*b implementations
Browse files Browse the repository at this point in the history
With a struct slab definition separate from struct page, we can go
further and define only fields that the chosen sl*b implementation uses.
This means everything between __page_flags and __page_refcount
placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in
all implementations (slab_list) but can be part of a union in some, so
it's simpler to repeat them than complicate the definition with ifdefs
even more.

The patch doesn't change physical offsets of the fields, although it
could be done later - for example it's now clear that tighter packing in
SLOB could be possible.

This should also prevent accidental use of fields that don't exist in
given implementation. Before this patch virt_to_cache() and
cache_from_obj() were visible for SLOB (albeit not used), although they
rely on the slab_cache field that isn't set by SLOB. With this patch
it's now a compile error, so these functions are now hidden behind
an #ifndef CONFIG_SLOB.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
Tested-by: Marco Elver <elver@google.com> # kfence
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: <kasan-dev@googlegroups.com>
  • Loading branch information
Vlastimil Babka committed Jan 6, 2022
1 parent 8dae0cf commit 401fb12
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 14 deletions.
9 changes: 5 additions & 4 deletions mm/kfence/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
/* Set required slab fields. */
slab = virt_to_slab((void *)meta->addr);
slab->slab_cache = cache;
if (IS_ENABLED(CONFIG_SLUB))
slab->objects = 1;
if (IS_ENABLED(CONFIG_SLAB))
slab->s_mem = addr;
#if defined(CONFIG_SLUB)
slab->objects = 1;
#elif defined(CONFIG_SLAB)
slab->s_mem = addr;
#endif

/* Memory initialization. */
for_each_canary(meta, set_canary_byte);
Expand Down
48 changes: 38 additions & 10 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,35 +8,57 @@
/* Reuses the bits in struct page */
struct slab {
unsigned long __page_flags;

#if defined(CONFIG_SLAB)

union {
struct list_head slab_list;
struct { /* Partial pages */
struct rcu_head rcu_head;
};
struct kmem_cache *slab_cache;
void *freelist; /* array of free object indexes */
void *s_mem; /* first object */
unsigned int active;

#elif defined(CONFIG_SLUB)

union {
struct list_head slab_list;
struct rcu_head rcu_head;
struct {
struct slab *next;
#ifdef CONFIG_64BIT
int slabs; /* Nr of slabs left */
#else
short int slabs;
#endif
};
struct rcu_head rcu_head;
};
struct kmem_cache *slab_cache; /* not slob */
struct kmem_cache *slab_cache;
/* Double-word boundary */
void *freelist; /* first free object */
union {
void *s_mem; /* slab: first object */
unsigned long counters; /* SLUB */
struct { /* SLUB */
unsigned long counters;
struct {
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
unsigned int __unused;

#elif defined(CONFIG_SLOB)

struct list_head slab_list;
void *__unused_1;
void *freelist; /* first free block */
void *__unused_2;
int units;

#else
#error "Unexpected slab allocator configured"
#endif

union {
unsigned int active; /* SLAB */
int units; /* SLOB */
};
atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
Expand All @@ -48,10 +70,14 @@ struct slab {
SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
SLAB_MATCH(slab_list, slab_list);
#ifndef CONFIG_SLOB
SLAB_MATCH(rcu_head, rcu_head);
SLAB_MATCH(slab_cache, slab_cache);
#endif
#ifdef CONFIG_SLAB
SLAB_MATCH(s_mem, s_mem);
SLAB_MATCH(active, active);
#endif
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
SLAB_MATCH(memcg_data, memcg_data);
Expand Down Expand Up @@ -599,6 +625,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
}
#endif /* CONFIG_MEMCG_KMEM */

#ifndef CONFIG_SLOB
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct slab *slab;
Expand Down Expand Up @@ -645,6 +672,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
print_tracking(cachep, x);
return cachep;
}
#endif /* CONFIG_SLOB */

static inline size_t slab_ksize(const struct kmem_cache *s)
{
Expand Down

0 comments on commit 401fb12

Please sign in to comment.