From df21a9c904e7cb4eacc97c9278464600394660ba Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:27 -0800 Subject: [PATCH] --- yaml --- r: 347016 b: refs/heads/master c: ba6c496ed834a37a26fc6fc87fc9aecb0fa0014d h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/include/linux/slab.h | 24 ++++++++++++++++++++++++ trunk/include/linux/slab_def.h | 3 +++ trunk/include/linux/slub_def.h | 3 +++ trunk/mm/slab.h | 13 +++++++++++++ 5 files changed, 44 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 9b37666e855d..615af2ea9dcc 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d5bdae7d59451b9d63303f7794ef32bb76ba6330 +refs/heads/master: ba6c496ed834a37a26fc6fc87fc9aecb0fa0014d diff --git a/trunk/include/linux/slab.h b/trunk/include/linux/slab.h index 743a10415122..00efba149222 100644 --- a/trunk/include/linux/slab.h +++ b/trunk/include/linux/slab.h @@ -176,6 +176,30 @@ void kmem_cache_free(struct kmem_cache *, void *); #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif +/* + * This is the main placeholder for memcg-related information in kmem caches. + * struct kmem_cache will hold a pointer to it, so the memory cost while + * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it + * would otherwise be if that would be bundled in kmem_cache: we'll need an + * extra pointer chase. But the trade off clearly lays in favor of not + * penalizing non-users. + * + * Both the root cache and the child caches will have it. For the root cache, + * this will hold a dynamically allocated array large enough to hold + * information about the currently limited memcgs in the system. + * + * Child caches will hold extra metadata needed for its operation. Fields are: + * + * @memcg: pointer to the memcg this cache belongs to + */ +struct memcg_cache_params { + bool is_root_cache; + union { + struct kmem_cache *memcg_caches[0]; + struct mem_cgroup *memcg; + }; +}; + /* * Common kmalloc functions provided by all allocators */ diff --git a/trunk/include/linux/slab_def.h b/trunk/include/linux/slab_def.h index 45c0356fdc8c..8bb6e0eaf3c6 100644 --- a/trunk/include/linux/slab_def.h +++ b/trunk/include/linux/slab_def.h @@ -81,6 +81,9 @@ struct kmem_cache { */ int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ +#ifdef CONFIG_MEMCG_KMEM + struct memcg_cache_params *memcg_params; +#endif /* 6) per-cpu/per-node data, touched during every alloc/free */ /* diff --git a/trunk/include/linux/slub_def.h b/trunk/include/linux/slub_def.h index df448adb7283..961e72eab907 100644 --- a/trunk/include/linux/slub_def.h +++ b/trunk/include/linux/slub_def.h @@ -101,6 +101,9 @@ struct kmem_cache { #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ #endif +#ifdef CONFIG_MEMCG_KMEM + struct memcg_cache_params *memcg_params; +#endif #ifdef CONFIG_NUMA /* diff --git a/trunk/mm/slab.h b/trunk/mm/slab.h index 1cb9c9ee0e6f..49e7a8b1d27e 100644 --- a/trunk/mm/slab.h +++ b/trunk/mm/slab.h @@ -100,4 +100,17 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); + +#ifdef CONFIG_MEMCG_KMEM +static inline bool is_root_cache(struct kmem_cache *s) +{ + return !s->memcg_params || s->memcg_params->is_root_cache; +} +#else +static inline bool is_root_cache(struct kmem_cache *s) +{ + return true; +} + +#endif #endif