Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 633
b: refs/heads/master
c: 97e2bde
h: refs/heads/master
i:
  631: 581871a
v: v3
  • Loading branch information
Manfred Spraul authored and Linus Torvalds committed May 1, 2005
1 parent e67156c commit 9629f0b
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dd1d5afca8d3bda7ff9db773fc08e648d2503dc6
refs/heads/master: 97e2bde47f886a317909c8a8f9bd2fcd8ce2f0b0
23 changes: 15 additions & 8 deletions trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,16 +62,9 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo
extern int kmem_cache_destroy(kmem_cache_t *);
extern int kmem_cache_shrink(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(kmem_cache_t *, int);
#else
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node)
{
return kmem_cache_alloc(cachep, GFP_KERNEL);
}
#endif
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);

/* Size description struct for general caches. */
struct cache_sizes {
Expand Down Expand Up @@ -109,6 +102,20 @@ extern void *kcalloc(size_t, size_t, unsigned int __nocast);
extern void kfree(const void *);
extern unsigned int ksize(const void *);

#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
extern void *kmalloc_node(size_t size, int flags, int node);
#else
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
{
return kmem_cache_alloc(cachep, flags);
}
static inline void *kmalloc_node(size_t size, int flags, int node)
{
return kmalloc(size, flags);
}
#endif

extern int FASTCALL(kmem_cache_reap(int));
extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));

Expand Down
45 changes: 31 additions & 14 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -583,7 +583,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep)
return cachep->array[smp_processor_id()];
}

static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;

Expand All @@ -607,6 +607,12 @@ static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
return csizep->cs_cachep;
}

kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);

/* Cal the num objs, wastage, and bytes left over for a given slab size. */
static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
int flags, size_t *left_over, unsigned int *num)
Expand Down Expand Up @@ -672,14 +678,11 @@ static struct array_cache *alloc_arraycache(int cpu, int entries,
int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
struct array_cache *nc = NULL;

if (cpu != -1) {
kmem_cache_t *cachep;
cachep = kmem_find_general_cachep(memsize, GFP_KERNEL);
if (cachep)
nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu));
}
if (!nc)
if (cpu == -1)
nc = kmalloc(memsize, GFP_KERNEL);
else
nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu));

if (nc) {
nc->avail = 0;
nc->limit = entries;
Expand Down Expand Up @@ -2361,7 +2364,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
* and can sleep. And it will allocate memory on the given node, which
* can improve the performance for cpu bound structures.
*/
void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
{
int loop;
void *objp;
Expand Down Expand Up @@ -2393,7 +2396,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
spin_unlock_irq(&cachep->spinlock);

local_irq_disable();
if (!cache_grow(cachep, GFP_KERNEL, nodeid)) {
if (!cache_grow(cachep, flags, nodeid)) {
local_irq_enable();
return NULL;
}
Expand Down Expand Up @@ -2435,6 +2438,16 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);

void *kmalloc_node(size_t size, int flags, int node)
{
kmem_cache_t *cachep;

cachep = kmem_find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
return kmem_cache_alloc_node(cachep, flags, node);
}
EXPORT_SYMBOL(kmalloc_node);
#endif

/**
Expand Down Expand Up @@ -2462,7 +2475,12 @@ void *__kmalloc(size_t size, unsigned int __nocast flags)
{
kmem_cache_t *cachep;

cachep = kmem_find_general_cachep(size, flags);
/* If you want to save a few bytes .text space: replace
* __ with kmem_.
* Then kmalloc uses the uninlined functions instead of the inline
* functions.
*/
cachep = __find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
return __cache_alloc(cachep, flags);
Expand All @@ -2489,9 +2507,8 @@ void *__alloc_percpu(size_t size, size_t align)
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
pdata->ptrs[i] = kmem_cache_alloc_node(
kmem_find_general_cachep(size, GFP_KERNEL),
cpu_to_node(i));
pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL,
cpu_to_node(i));

if (!pdata->ptrs[i])
goto unwind_oom;
Expand Down

0 comments on commit 9629f0b

Please sign in to comment.