Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 227687
b: refs/heads/master
c: 4a92379
h: refs/heads/master
i:
  227685: c29071b
  227683: b43e79f
  227679: dc5d5bd
v: v3
  • Loading branch information
Richard Kennedy authored and Pekka Enberg committed Nov 6, 2010
1 parent 32f479f commit ad25efa
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 37 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0d24db337e6d81c0c620ab65cc6947bd6553f742
refs/heads/master: 4a92379bdfb48680a5e6775dd53a586df7b6b0b1
55 changes: 26 additions & 29 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,8 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/kmemleak.h>

#include <trace/events/kmem.h>
#include <linux/kmemleak.h>

enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
Expand Down Expand Up @@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

static __always_inline void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
kmemleak_alloc(ret, size, 1, flags);
return ret;
}

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
#else
static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
return kmem_cache_alloc(s, gfpflags);
}

static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
return kmalloc_order(size, flags, order);
}
#endif

static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);

kmemleak_alloc(ret, size, 1, flags);
trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);

return ret;
return kmalloc_order_trace(size, flags, order);
}

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
void *ret;

if (__builtin_constant_p(size)) {
if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);
Expand All @@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (!s)
return ZERO_SIZE_PTR;

ret = kmem_cache_alloc_notrace(s, flags);

trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);

return ret;
return kmem_cache_alloc_trace(s, flags, size);
}
}
return __kmalloc(size, flags);
Expand All @@ -266,36 +270,29 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
int node, size_t size);
#else
static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *s,
kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node)
int node, size_t size)
{
return kmem_cache_alloc_node(s, gfpflags, node);
}
#endif

static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
void *ret;

if (__builtin_constant_p(size) &&
size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
return ZERO_SIZE_PTR;

ret = kmem_cache_alloc_node_notrace(s, flags, node);

trace_kmalloc_node(_THIS_IP_, ret,
size, s->size, flags, node);

return ret;
return kmem_cache_alloc_node_trace(s, flags, node, size);
}
return __kmalloc_node(size, flags, node);
}
Expand Down
30 changes: 23 additions & 7 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
#include <linux/math64.h>
#include <linux/fault-inject.h>

#include <trace/events/kmem.h>

/*
* Lock order:
* 1. slab_lock(page)
Expand Down Expand Up @@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
EXPORT_SYMBOL(kmem_cache_alloc);

#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);

void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
void *ret = kmalloc_order(size, flags, order);
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
EXPORT_SYMBOL(kmalloc_order_trace);
#endif

#ifdef CONFIG_NUMA
Expand All @@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);

#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node)
int node, size_t size)
{
return slab_alloc(s, gfpflags, node, _RET_IP_);
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);

trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
#endif

Expand Down

0 comments on commit ad25efa

Please sign in to comment.