Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 319576
b: refs/heads/master
c: 18004c5
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Jul 9, 2012
1 parent f3fce7d commit 361bceb
Show file tree
Hide file tree
Showing 5 changed files with 83 additions and 87 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 97d06609158e61f6bdf538c4a6788e2de492236f
refs/heads/master: 18004c5d4084d965aa1396392706b8688306427a
108 changes: 51 additions & 57 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
* Further notes from the original documentation:
*
* 11 April '97. Started multi-threading - markhe
* The global cache-chain is protected by the mutex 'cache_chain_mutex'.
* The global cache-chain is protected by the mutex 'slab_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()).
Expand Down Expand Up @@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
}
#endif

/*
* Guard access to the cache-chain.
*/
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;

static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);

static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Expand Down Expand Up @@ -1100,15 +1094,15 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* When hotplugging memory or a cpu, existing nodelists are not replaced if
* already in use.
*
* Must hold cache_chain_mutex.
* Must hold slab_mutex.
*/
static int init_cache_nodelists_node(int node)
{
struct kmem_cache *cachep;
struct kmem_list3 *l3;
const int memsize = sizeof(struct kmem_list3);

list_for_each_entry(cachep, &cache_chain, list) {
list_for_each_entry(cachep, &slab_caches, list) {
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
Expand All @@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node)

/*
* The l3s don't come and go as CPUs come and
* go. cache_chain_mutex is sufficient
* go. slab_mutex is sufficient
* protection here.
*/
cachep->nodelists[node] = l3;
Expand All @@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu)
int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node);

list_for_each_entry(cachep, &cache_chain, list) {
list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
Expand Down Expand Up @@ -1196,7 +1190,7 @@ static void __cpuinit cpuup_canceled(long cpu)
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
list_for_each_entry(cachep, &cache_chain, list) {
list_for_each_entry(cachep, &slab_caches, list) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
Expand Down Expand Up @@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
list_for_each_entry(cachep, &cache_chain, list) {
list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc;
struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
Expand Down Expand Up @@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
err = cpuup_prepare(cpu);
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
Expand All @@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/*
* Shutdown cache reaper. Note that the cache_chain_mutex is
* Shutdown cache reaper. Note that the slab_mutex is
* held so that if cache_reap() is invoked it cannot do
* anything expensive but will only modify reap_work
* and reschedule the timer.
Expand All @@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
cpuup_canceled(cpu);
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
break;
}
return notifier_from_errno(err);
Expand All @@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
* Returns -EBUSY if all objects cannot be drained so that the node is not
* removed.
*
* Must hold cache_chain_mutex.
* Must hold slab_mutex.
*/
static int __meminit drain_cache_nodelists_node(int node)
{
struct kmem_cache *cachep;
int ret = 0;

list_for_each_entry(cachep, &cache_chain, list) {
list_for_each_entry(cachep, &slab_caches, list) {
struct kmem_list3 *l3;

l3 = cachep->nodelists[node];
Expand Down Expand Up @@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,

switch (action) {
case MEM_GOING_ONLINE:
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
ret = init_cache_nodelists_node(nid);
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
break;
case MEM_GOING_OFFLINE:
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
ret = drain_cache_nodelists_node(nid);
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
break;
case MEM_ONLINE:
case MEM_OFFLINE:
Expand Down Expand Up @@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void)
node = numa_mem_id();

/* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.list, &cache_chain);
INIT_LIST_HEAD(&slab_caches);
list_add(&cache_cache.list, &slab_caches);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
Expand Down Expand Up @@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void)
init_lock_keys();

/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, list)
mutex_lock(&slab_mutex);
list_for_each_entry(cachep, &slab_caches, list)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);

/* Done! */
slab_state = FULL;
Expand Down Expand Up @@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
*/
if (slab_is_available()) {
get_online_cpus();
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
}

list_for_each_entry(pc, &cache_chain, list) {
list_for_each_entry(pc, &slab_caches, list) {
char tmp;
int res;

Expand Down Expand Up @@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
}

/* cache setup completed, link it into the list */
list_add(&cachep->list, &cache_chain);
list_add(&cachep->list, &slab_caches);
oops:
if (slab_is_available()) {
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
put_online_cpus();
}
return cachep;
Expand Down Expand Up @@ -2622,7 +2616,7 @@ static int drain_freelist(struct kmem_cache *cache,
return nr_freed;
}

/* Called with cache_chain_mutex held to protect against cpu hotplug */
/* Called with slab_mutex held to protect against cpu hotplug */
static int __cache_shrink(struct kmem_cache *cachep)
{
int ret = 0, i = 0;
Expand Down Expand Up @@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
BUG_ON(!cachep || in_interrupt());

get_online_cpus();
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
ret = __cache_shrink(cachep);
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
put_online_cpus();
return ret;
}
Expand Down Expand Up @@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)

/* Find the cache in the chain of caches. */
get_online_cpus();
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->list);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
list_add(&cachep->list, &cache_chain);
mutex_unlock(&cache_chain_mutex);
list_add(&cachep->list, &slab_caches);
mutex_unlock(&slab_mutex);
put_online_cpus();
return;
}
Expand All @@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
rcu_barrier();

__kmem_cache_destroy(cachep);
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
Expand Down Expand Up @@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old;
}

/* Always called with the cache_chain_mutex held */
/* Always called with the slab_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
Expand Down Expand Up @@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
return alloc_kmemlist(cachep, gfp);
}

/* Called with cache_chain_mutex held always */
/* Called with slab_mutex held always */
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{
int err;
Expand Down Expand Up @@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w)
int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w);

if (!mutex_trylock(&cache_chain_mutex))
if (!mutex_trylock(&slab_mutex))
/* Give up. Setup the next iteration. */
goto out;

list_for_each_entry(searchp, &cache_chain, list) {
list_for_each_entry(searchp, &slab_caches, list) {
check_irq_on();

/*
Expand Down Expand Up @@ -4205,7 +4199,7 @@ static void cache_reap(struct work_struct *w)
cond_resched();
}
check_irq_on();
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
next_reap_node();
out:
/* Set up the next iteration */
Expand Down Expand Up @@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;

mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
if (!n)
print_slabinfo_header(m);

return seq_list_start(&cache_chain, *pos);
return seq_list_start(&slab_caches, *pos);
}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &cache_chain, pos);
return seq_list_next(p, &slab_caches, pos);
}

static void s_stop(struct seq_file *m, void *p)
{
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
}

static int s_show(struct seq_file *m, void *p)
Expand Down Expand Up @@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
return -EINVAL;

/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
res = -EINVAL;
list_for_each_entry(cachep, &cache_chain, list) {
list_for_each_entry(cachep, &slab_caches, list) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
Expand All @@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
break;
}
}
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
if (res >= 0)
res = count;
return res;
Expand All @@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = {

static void *leaks_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&cache_chain_mutex);
return seq_list_start(&cache_chain, *pos);
mutex_lock(&slab_mutex);
return seq_list_start(&slab_caches, *pos);
}

static inline int add_caller(unsigned long *n, unsigned long v)
Expand Down Expand Up @@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p)
name = cachep->name;
if (n[0] == n[1]) {
/* Increase the buffer size */
mutex_unlock(&cache_chain_mutex);
mutex_unlock(&slab_mutex);
m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
if (!m->private) {
/* Too bad, we are really out */
m->private = n;
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
return -ENOMEM;
}
*(unsigned long *)m->private = n[0] * 2;
kfree(n);
mutex_lock(&cache_chain_mutex);
mutex_lock(&slab_mutex);
/* Now make sure this entry will be retried */
m->count = m->size;
return 0;
Expand Down
4 changes: 4 additions & 0 deletions trunk/mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ enum slab_state {

extern enum slab_state slab_state;

/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
extern struct list_head slab_caches;

struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));

Expand Down
2 changes: 2 additions & 0 deletions trunk/mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include "slab.h"

enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);

/*
* kmem_cache_create - Create a cache.
Expand Down
Loading

0 comments on commit 361bceb

Please sign in to comment.