Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 365465
b: refs/heads/master
c: ca0dde9
h: refs/heads/master
i:
  365463: 5a7d170
v: v3
  • Loading branch information
Li Zefan authored and Linus Torvalds committed Apr 29, 2013
1 parent ccacfa5 commit 4d3419a
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 31 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ebff7d8f270d045338d9f4796014f4db429a17f9
refs/heads/master: ca0dde97178e75ed1370b8616326f5496a803d65
63 changes: 33 additions & 30 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -3483,20 +3483,15 @@ static void memcg_create_cache_work_func(struct work_struct *w)

/*
* Enqueue the creation of a per-memcg kmem_cache.
* Called with rcu_read_lock.
*/
static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
struct kmem_cache *cachep)
{
struct create_work *cw;

cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
if (cw == NULL)
return;

/* The corresponding put will be done in the workqueue. */
if (!css_tryget(&memcg->css)) {
kfree(cw);
if (cw == NULL) {
css_put(&memcg->css);
return;
}

Expand Down Expand Up @@ -3552,10 +3547,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,

rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
rcu_read_unlock();

if (!memcg_can_account_kmem(memcg))
return cachep;
goto out;

idx = memcg_cache_id(memcg);

Expand All @@ -3564,29 +3558,38 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
* code updating memcg_caches will issue a write barrier to match this.
*/
read_barrier_depends();
if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
/*
* If we are in a safe context (can wait, and not in interrupt
* context), we could be be predictable and return right away.
* This would guarantee that the allocation being performed
* already belongs in the new cache.
*
* However, there are some clashes that can arrive from locking.
* For instance, because we acquire the slab_mutex while doing
* kmem_cache_dup, this means no further allocation could happen
* with the slab_mutex held.
*
* Also, because cache creation issue get_online_cpus(), this
* creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
* that ends up reversed during cpu hotplug. (cpuset allocates
* a bunch of GFP_KERNEL memory during cpuup). Due to all that,
* better to defer everything.
*/
memcg_create_cache_enqueue(memcg, cachep);
return cachep;
if (likely(cachep->memcg_params->memcg_caches[idx])) {
cachep = cachep->memcg_params->memcg_caches[idx];
goto out;
}

return cachep->memcg_params->memcg_caches[idx];
/* The corresponding put will be done in the workqueue. */
if (!css_tryget(&memcg->css))
goto out;
rcu_read_unlock();

/*
* If we are in a safe context (can wait, and not in interrupt
* context), we could be be predictable and return right away.
* This would guarantee that the allocation being performed
* already belongs in the new cache.
*
* However, there are some clashes that can arrive from locking.
* For instance, because we acquire the slab_mutex while doing
* kmem_cache_dup, this means no further allocation could happen
* with the slab_mutex held.
*
* Also, because cache creation issue get_online_cpus(), this
* creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
* that ends up reversed during cpu hotplug. (cpuset allocates
* a bunch of GFP_KERNEL memory during cpuup). Due to all that,
* better to defer everything.
*/
memcg_create_cache_enqueue(memcg, cachep);
return cachep;
out:
rcu_read_unlock();
return cachep;
}
EXPORT_SYMBOL(__memcg_kmem_get_cache);

Expand Down

0 comments on commit 4d3419a

Please sign in to comment.