Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 347022
b: refs/heads/master
c: b9ce5ef
h: refs/heads/master
v: v3
  • Loading branch information
Glauber Costa authored and Linus Torvalds committed Dec 18, 2012
1 parent 3c8508a commit fb3e0ed
Show file tree
Hide file tree
Showing 6 changed files with 54 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0e9d92f2d02d8c8320f0502307c688d07bdac2b3
refs/heads/master: b9ce5ef49f00daf2254c6953c8d31f79aabccd34
5 changes: 5 additions & 0 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
return __memcg_kmem_get_cache(cachep, gfp);
}
#else
static inline bool memcg_kmem_enabled(void)
{
return false;
}

static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
Expand Down
6 changes: 5 additions & 1 deletion trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@
*/

#include <linux/slab.h>
#include "slab.h"
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
Expand Down Expand Up @@ -128,6 +127,8 @@

#include "internal.h"

#include "slab.h"

/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
Expand Down Expand Up @@ -3883,6 +3884,9 @@ EXPORT_SYMBOL(__kmalloc);
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
cachep = cache_from_obj(cachep, objp);
if (!cachep)
return;

local_irq_save(flags);
debug_check_no_locks_freed(objp, cachep->object_size);
Expand Down
39 changes: 39 additions & 0 deletions trunk/mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
return (is_root_cache(cachep) && !memcg) ||
(cachep->memcg_params->memcg == memcg);
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return (p == s) ||
(s->memcg_params && (p == s->memcg_params->root_cache));
}
#else
static inline bool is_root_cache(struct kmem_cache *s)
{
Expand All @@ -127,5 +134,37 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
{
return true;
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return true;
}
#endif

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
struct page *page;

/*
* When kmemcg is not being used, both assignments should return the
* same value. but we don't want to pay the assignment price in that
* case. If it is not compiled in, the compiler should be smart enough
* to not do even the assignment. In that case, slab_equal_or_root
* will also be a constant.
*/
if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
return s;

page = virt_to_head_page(x);
cachep = page->slab_cache;
if (slab_equal_or_root(cachep, s))
return cachep;

pr_err("%s: Wrong slab cache. %s but object is from %s\n",
__FUNCTION__, cachep->name, s->name);
WARN_ON_ONCE(1);
return s;
}
#endif
2 changes: 1 addition & 1 deletion trunk/mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@

#include <linux/kernel.h>
#include <linux/slab.h>
#include "slab.h"

#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
Expand All @@ -73,6 +72,7 @@

#include <linux/atomic.h>

#include "slab.h"
/*
* slob_block has a field 'units', which indicates size of block if +ve,
* or offset of next block if -ve (in SLOB_UNITs).
Expand Down
15 changes: 3 additions & 12 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2611,19 +2611,10 @@ static __always_inline void slab_free(struct kmem_cache *s,

void kmem_cache_free(struct kmem_cache *s, void *x)
{
struct page *page;

page = virt_to_head_page(x);

if (kmem_cache_debug(s) && page->slab_cache != s) {
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
" is from %s\n", page->slab_cache->name, s->name);
WARN_ON_ONCE(1);
s = cache_from_obj(s, x);
if (!s)
return;
}

slab_free(s, page, x, _RET_IP_);

slab_free(s, virt_to_head_page(x), x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);
Expand Down

0 comments on commit fb3e0ed

Please sign in to comment.