Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 347029
b: refs/heads/master
c: 107dab5
h: refs/heads/master
i:
  347027: 8b37aed
v: v3
  • Loading branch information
Glauber Costa authored and Linus Torvalds committed Dec 18, 2012
1 parent e3b2b7b commit 828771a
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 943a451a87d229ca564a27274b58eaeae35fde5d
refs/heads/master: 107dab5c92d5f9c3afe962036e47c207363255c7
1 change: 1 addition & 0 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ struct kmem_cache {
#endif
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
#endif

#ifdef CONFIG_NUMA
Expand Down
76 changes: 75 additions & 1 deletion trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -201,13 +201,14 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);

static void memcg_propagate_slab_attrs(struct kmem_cache *s);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s) { }

static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
#endif

static inline void stat(const struct kmem_cache *s, enum stat_item si)
Expand Down Expand Up @@ -3865,6 +3866,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
if (slab_state <= UP)
return 0;

memcg_propagate_slab_attrs(s);
mutex_unlock(&slab_mutex);
err = sysfs_slab_add(s);
mutex_lock(&slab_mutex);
Expand Down Expand Up @@ -5098,10 +5100,82 @@ static ssize_t slab_attr_store(struct kobject *kobj,
return -EIO;

err = attribute->store(s, buf, len);
#ifdef CONFIG_MEMCG_KMEM
if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
int i;

mutex_lock(&slab_mutex);
if (s->max_attr_size < len)
s->max_attr_size = len;

for_each_memcg_cache_index(i) {
struct kmem_cache *c = cache_from_memcg(s, i);
/*
* This function's return value is determined by the
* parent cache only
*/
if (c)
attribute->store(c, buf, len);
}
mutex_unlock(&slab_mutex);
}
#endif
return err;
}

static void memcg_propagate_slab_attrs(struct kmem_cache *s)
{
#ifdef CONFIG_MEMCG_KMEM
int i;
char *buffer = NULL;

if (!is_root_cache(s))
return;

/*
* This mean this cache had no attribute written. Therefore, no point
* in copying default values around
*/
if (!s->max_attr_size)
return;

for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
char mbuf[64];
char *buf;
struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);

if (!attr || !attr->store || !attr->show)
continue;

/*
* It is really bad that we have to allocate here, so we will
* do it only as a fallback. If we actually allocate, though,
* we can just use the allocated buffer until the end.
*
* Most of the slub attributes will tend to be very small in
* size, but sysfs allows buffers up to a page, so they can
* theoretically happen.
*/
if (buffer)
buf = buffer;
else if (s->max_attr_size < ARRAY_SIZE(mbuf))
buf = mbuf;
else {
buffer = (char *) get_zeroed_page(GFP_KERNEL);
if (WARN_ON(!buffer))
continue;
buf = buffer;
}

attr->show(s->memcg_params->root_cache, buf);
attr->store(s, buf, strlen(buf));
}

if (buffer)
free_page((unsigned long)buffer);
#endif
}

static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
Expand Down

0 comments on commit 828771a

Please sign in to comment.