Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 347024
b: refs/heads/master
c: 1f458cb
h: refs/heads/master
v: v3
  • Loading branch information
Glauber Costa authored and Linus Torvalds committed Dec 18, 2012
1 parent e4e9568 commit d601663
Show file tree
Hide file tree
Showing 7 changed files with 107 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d79923fad95b0cdf7770e024677180c734cb7148
refs/heads/master: 1f458cbf122288b23620ee822e19bcbb76c8d6ec
2 changes: 2 additions & 0 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,8 @@ void memcg_update_array_size(int num_groups);
struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);

void mem_cgroup_destroy_cache(struct kmem_cache *cachep);

/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
* @gfp: the gfp allocation flags.
Expand Down
10 changes: 9 additions & 1 deletion trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@

#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/workqueue.h>


/*
* Flags to pass to kmem_cache_create().
Expand Down Expand Up @@ -179,7 +181,6 @@ void kmem_cache_free(struct kmem_cache *, void *);
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
* This is the main placeholder for memcg-related information in kmem caches.
* struct kmem_cache will hold a pointer to it, so the memory cost while
Expand All @@ -197,6 +198,10 @@ void kmem_cache_free(struct kmem_cache *, void *);
* @memcg: pointer to the memcg this cache belongs to
* @list: list_head for the list of all caches in this memcg
* @root_cache: pointer to the global, root cache, this cache was derived from
* @dead: set to true after the memcg dies; the cache may still be around.
* @nr_pages: number of pages that belongs to this cache.
* @destroy: worker to be called whenever we are ready, or believe we may be
* ready, to destroy this cache.
*/
struct memcg_cache_params {
bool is_root_cache;
Expand All @@ -206,6 +211,9 @@ struct memcg_cache_params {
struct mem_cgroup *memcg;
struct list_head list;
struct kmem_cache *root_cache;
bool dead;
atomic_t nr_pages;
struct work_struct destroy;
};
};
};
Expand Down
63 changes: 63 additions & 0 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2779,6 +2779,19 @@ static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
(memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
}

/*
* This is a bit cumbersome, but it is rarely used and avoids a backpointer
* in the memcg_cache_params struct.
*/
static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
{
struct kmem_cache *cachep;

VM_BUG_ON(p->is_root_cache);
cachep = p->root_cache;
return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
}

static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{
struct res_counter *fail_res;
Expand Down Expand Up @@ -3056,6 +3069,31 @@ static inline void memcg_resume_kmem_account(void)
current->memcg_kmem_skip_account--;
}

static void kmem_cache_destroy_work_func(struct work_struct *w)
{
struct kmem_cache *cachep;
struct memcg_cache_params *p;

p = container_of(w, struct memcg_cache_params, destroy);

cachep = memcg_params_to_cache(p);

if (!atomic_read(&cachep->memcg_params->nr_pages))
kmem_cache_destroy(cachep);
}

void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
{
if (!cachep->memcg_params->dead)
return;

/*
* We have to defer the actual destroying to a workqueue, because
* we might currently be in a context that cannot sleep.
*/
schedule_work(&cachep->memcg_params->destroy);
}

static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
{
char *name;
Expand Down Expand Up @@ -3125,6 +3163,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,

mem_cgroup_get(memcg);
new_cachep->memcg_params->root_cache = cachep;
atomic_set(&new_cachep->memcg_params->nr_pages , 0);

cachep->memcg_params->memcg_caches[idx] = new_cachep;
/*
Expand All @@ -3143,6 +3182,25 @@ struct create_work {
struct work_struct work;
};

static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
struct kmem_cache *cachep;
struct memcg_cache_params *params;

if (!memcg_kmem_is_active(memcg))
return;

mutex_lock(&memcg->slab_caches_mutex);
list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
cachep = memcg_params_to_cache(params);
cachep->memcg_params->dead = true;
INIT_WORK(&cachep->memcg_params->destroy,
kmem_cache_destroy_work_func);
schedule_work(&cachep->memcg_params->destroy);
}
mutex_unlock(&memcg->slab_caches_mutex);
}

static void memcg_create_cache_work_func(struct work_struct *w)
{
struct create_work *cw;
Expand Down Expand Up @@ -3358,6 +3416,10 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
VM_BUG_ON(mem_cgroup_is_root(memcg));
memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
}
#else
static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
}
#endif /* CONFIG_MEMCG_KMEM */

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Expand Down Expand Up @@ -5975,6 +6037,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);

mem_cgroup_reparent_charges(memcg);
mem_cgroup_destroy_all_caches(memcg);
}

static void mem_cgroup_css_free(struct cgroup *cont)
Expand Down
3 changes: 3 additions & 0 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1895,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
if (page->pfmemalloc)
SetPageSlabPfmemalloc(page + i);
}
memcg_bind_pages(cachep, cachep->gfporder);

if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
Expand Down Expand Up @@ -1931,6 +1932,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
__ClearPageSlab(page);
page++;
}

memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
Expand Down
23 changes: 23 additions & 0 deletions trunk/mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,21 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
(cachep->memcg_params->memcg == memcg);
}

static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
if (!is_root_cache(s))
atomic_add(1 << order, &s->memcg_params->nr_pages);
}

static inline void memcg_release_pages(struct kmem_cache *s, int order)
{
if (is_root_cache(s))
return;

if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
mem_cgroup_destroy_cache(s);
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
Expand All @@ -135,6 +150,14 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
return true;
}

static inline void memcg_bind_pages(struct kmem_cache *s, int order)
{
}

static inline void memcg_release_pages(struct kmem_cache *s, int order)
{
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
Expand Down
7 changes: 6 additions & 1 deletion trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1344,6 +1344,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
void *start;
void *last;
void *p;
int order;

BUG_ON(flags & GFP_SLAB_BUG_MASK);

Expand All @@ -1352,7 +1353,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
goto out;

order = compound_order(page);
inc_slabs_node(s, page_to_nid(page), page->objects);
memcg_bind_pages(s, order);
page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
Expand All @@ -1361,7 +1364,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
start = page_address(page);

if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
memset(start, POISON_INUSE, PAGE_SIZE << order);

last = start;
for_each_object(p, s, start, page->objects) {
Expand Down Expand Up @@ -1402,6 +1405,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)

__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);

memcg_release_pages(s, order);
reset_page_mapcount(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
Expand Down

0 comments on commit d601663

Please sign in to comment.