Skip to content

Commit

Permalink
mm/slab: factor out kmem_cache_node initialization code
Browse files Browse the repository at this point in the history
It can be reused on other place, so factor out it.  Following patch will
use it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Joonsoo Kim authored and Linus Torvalds committed May 20, 2016
1 parent a5aa63a commit ded0ecf
Showing 1 changed file with 45 additions and 29 deletions.
74 changes: 45 additions & 29 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,46 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
}
#endif

static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
{
struct kmem_cache_node *n;

/*
* Set up the kmem_cache_node for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
n = get_node(cachep, node);
if (n) {
spin_lock_irq(&n->list_lock);
n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
cachep->num;
spin_unlock_irq(&n->list_lock);

return 0;
}

n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
if (!n)
return -ENOMEM;

kmem_cache_node_init(n);
n->next_reap = jiffies + REAPTIMEOUT_NODE +
((unsigned long)cachep) % REAPTIMEOUT_NODE;

n->free_limit =
(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;

/*
* The kmem_cache_nodes don't come and go as CPUs
* come and go. slab_mutex is sufficient
* protection here.
*/
cachep->node[node] = n;

return 0;
}

/*
* Allocates and initializes node for a node on each slab cache, used for
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
Expand All @@ -859,39 +899,15 @@ static inline gfp_t gfp_exact_node(gfp_t flags)
*/
static int init_cache_node_node(int node)
{
int ret;
struct kmem_cache *cachep;
struct kmem_cache_node *n;
const size_t memsize = sizeof(struct kmem_cache_node);

list_for_each_entry(cachep, &slab_caches, list) {
/*
* Set up the kmem_cache_node for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
n = get_node(cachep, node);
if (!n) {
n = kmalloc_node(memsize, GFP_KERNEL, node);
if (!n)
return -ENOMEM;
kmem_cache_node_init(n);
n->next_reap = jiffies + REAPTIMEOUT_NODE +
((unsigned long)cachep) % REAPTIMEOUT_NODE;

/*
* The kmem_cache_nodes don't come and go as CPUs
* come and go. slab_mutex is sufficient
* protection here.
*/
cachep->node[node] = n;
}

spin_lock_irq(&n->list_lock);
n->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&n->list_lock);
ret = init_cache_node(cachep, node, GFP_KERNEL);
if (ret)
return ret;
}

return 0;
}

Expand Down

0 comments on commit ded0ecf

Please sign in to comment.