Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 373725
b: refs/heads/master
c: 95a05b4
h: refs/heads/master
i:
  373723: d13a8d1
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Feb 1, 2013
1 parent c61a1bf commit 5b09c92
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 38 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6a67368c36e2c0c2578ba62f6264ab739af08cce
refs/heads/master: 95a05b428cc675694321c8f762591984f3fd2b1e
34 changes: 24 additions & 10 deletions trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,12 @@ struct kmem_cache {
#else /* CONFIG_SLOB */

/*
* The largest kmalloc size supported by the slab allocators is
* Kmalloc array related definitions
*/

#ifdef CONFIG_SLAB
/*
* The largest kmalloc size supported by the SLAB allocators is
* 32 megabyte (2^25) or the maximum allocatable page order if that is
* less than 32 MB.
*
Expand All @@ -173,25 +178,34 @@ struct kmem_cache {
*/
#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
(MAX_ORDER + PAGE_SHIFT - 1) : 25)
#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
#define KMALLOC_SHIFT_LOW 5
#else
/*
* SLUB allocates up to order 2 pages directly and otherwise
* passes the request to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#define KMALLOC_SHIFT_LOW 3
#endif

#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocagtor */
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)

/*
* Kmalloc subsystem.
*/
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#else
#ifdef CONFIG_SLAB
#define KMALLOC_MIN_SIZE 32
#else
#define KMALLOC_MIN_SIZE 8
#endif
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif

#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

/*
* Figure out which kmalloc slab an allocation of a certain size
* belongs to.
Expand Down
19 changes: 3 additions & 16 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,19 +115,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};

/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
* is relatively slow so we need this value sufficiently high so that
* performance critical objects are allocated through the SLUB fastpath.
*
* This should be dropped to PAGE_SIZE / 2 once the page allocator
* "fastpath" becomes competitive with the slab allocator fastpaths.
*/
#define SLUB_MAX_SIZE (2 * PAGE_SIZE)

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
Expand All @@ -139,7 +126,7 @@ struct kmem_cache {
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];

/*
* Find the slab cache for a given combination of allocation flags and size.
Expand Down Expand Up @@ -211,7 +198,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
if (size > SLUB_MAX_SIZE)
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);

if (!(flags & SLUB_DMA)) {
Expand Down Expand Up @@ -247,7 +234,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
Expand Down
22 changes: 11 additions & 11 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2775,7 +2775,7 @@ init_kmem_cache_node(struct kmem_cache_node *n)
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));

/*
* Must align to double word boundary for the double cmpxchg
Expand Down Expand Up @@ -3174,11 +3174,11 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
* Kmalloc subsystem
*******************************************************************/

struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_caches);

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
static struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
#endif

static int __init setup_slub_min_order(char *str)
Expand Down Expand Up @@ -3280,7 +3280,7 @@ void *__kmalloc(size_t size, gfp_t flags)
struct kmem_cache *s;
void *ret;

if (unlikely(size > SLUB_MAX_SIZE))
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
return kmalloc_large(size, flags);

s = get_slab(size, flags);
Expand Down Expand Up @@ -3316,7 +3316,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s;
void *ret;

if (unlikely(size > SLUB_MAX_SIZE)) {
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, flags, node);

trace_kmalloc_node(_RET_IP_, ret,
Expand Down Expand Up @@ -3721,7 +3721,7 @@ void __init kmem_cache_init(void)
caches++;
}

for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
caches++;
}
Expand All @@ -3739,7 +3739,7 @@ void __init kmem_cache_init(void)
BUG_ON(!kmalloc_caches[2]->name);
}

for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);

BUG_ON(!s);
Expand All @@ -3751,7 +3751,7 @@ void __init kmem_cache_init(void)
#endif

#ifdef CONFIG_ZONE_DMA
for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
struct kmem_cache *s = kmalloc_caches[i];

if (s && s->size) {
Expand Down Expand Up @@ -3930,7 +3930,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
struct kmem_cache *s;
void *ret;

if (unlikely(size > SLUB_MAX_SIZE))
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
return kmalloc_large(size, gfpflags);

s = get_slab(size, gfpflags);
Expand All @@ -3953,7 +3953,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s;
void *ret;

if (unlikely(size > SLUB_MAX_SIZE)) {
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, gfpflags, node);

trace_kmalloc_node(caller, ret,
Expand Down Expand Up @@ -4312,7 +4312,7 @@ static void resiliency_test(void)
{
u8 *p;

BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);

printk(KERN_ERR "SLUB resiliency testing\n");
printk(KERN_ERR "-----------------------\n");
Expand Down

0 comments on commit 5b09c92

Please sign in to comment.