Skip to content

Commit

Permalink
slab: overloading the RCU head over the LRU for RCU free
Browse files Browse the repository at this point in the history
With build-time size checking, we can overload the RCU head over the LRU
of struct page to free pages of a slab in rcu context. This really help to
implement to overload the struct slab over the struct page and this
eventually reduce memory usage and cache footprint of the SLAB.

Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Pekka Enberg <penberg@iki.fi>
  • Loading branch information
Joonsoo Kim authored and Pekka Enberg committed Oct 24, 2013
1 parent 07d417a commit 6812670
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 39 deletions.
3 changes: 3 additions & 0 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,9 @@ struct page {

struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
};

/* Remainder is not double word aligned */
Expand Down
9 changes: 8 additions & 1 deletion include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,14 @@
* }
* rcu_read_unlock();
*
* See also the comment on struct slab_rcu in mm/slab.c.
* This is useful if we need to approach a kernel structure obliquely,
* from its address obtained without the usual locking. We can lock
* the structure to stabilize it and check it's still at the given address,
* only if we can be sure that the memory has not been meanwhile reused
* for some other kind of object (which our subsystem's lock might corrupt).
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*/
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
Expand Down
68 changes: 30 additions & 38 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,25 +188,6 @@ typedef unsigned int kmem_bufctl_t;
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)

/*
* struct slab_rcu
*
* slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
* arrange for kmem_freepages to be called via RCU. This is useful if
* we need to approach a kernel structure obliquely, from its address
* obtained without the usual locking. We can lock the structure to
* stabilize it and check it's still at the given address, only if we
* can be sure that the memory has not been meanwhile reused for some
* other kind of object (which our subsystem's lock might corrupt).
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*/
struct slab_rcu {
struct rcu_head head;
struct page *page;
};

/*
* struct slab
*
Expand All @@ -215,14 +196,11 @@ struct slab_rcu {
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
union {
struct {
struct list_head list;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free;
};
struct slab_rcu __slab_cover_slab_rcu;
struct {
struct list_head list;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free;
};
};

Expand Down Expand Up @@ -1509,6 +1487,8 @@ void __init kmem_cache_init(void)
{
int i;

BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot;
setup_node_pointer(kmem_cache);

Expand Down Expand Up @@ -1822,12 +1802,13 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)

static void kmem_rcu_free(struct rcu_head *head)
{
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
struct kmem_cache *cachep = slab_rcu->page->slab_cache;
struct kmem_cache *cachep;
struct page *page;

kmem_freepages(cachep, slab_rcu->page);
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slab_rcu);
page = container_of(head, struct page, rcu_head);
cachep = page->slab_cache;

kmem_freepages(cachep, page);
}

#if DEBUG
Expand Down Expand Up @@ -2048,16 +2029,27 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)

slab_destroy_debugcheck(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
struct rcu_head *head;

/*
* RCU free overloads the RCU head over the LRU.
* slab_page has been overloeaded over the LRU,
* however it is not used from now on so that
* we can use it safely.
*/
head = (void *)&page->rcu_head;
call_rcu(head, kmem_rcu_free);

slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->page = page;
call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
kmem_freepages(cachep, page);
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slabp);
}

/*
* From now on, we don't use slab management
* although actual page can be freed in rcu context
*/
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slabp);
}

/**
Expand Down

0 comments on commit 6812670

Please sign in to comment.