Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 54198
b: refs/heads/master
c: 81819f0
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed May 7, 2007
1 parent ec4d00f commit 4407155
Show file tree
Hide file tree
Showing 10 changed files with 3,423 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 543691a6cd70b606dd9bed5e77b120c5d9c5c506
refs/heads/master: 81819f0fc8285a2a5a921c019e3e3d7b6169d225
4 changes: 4 additions & 0 deletions trunk/arch/frv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ config ARCH_HAS_ILOG2_U64
bool
default y

config ARCH_USES_SLAB_PAGE_STRUCT
bool
default y

mainmenu "Fujitsu FR-V Kernel Configuration"

source "init/Kconfig"
Expand Down
4 changes: 4 additions & 0 deletions trunk/arch/i386/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default y

config ARCH_USES_SLAB_PAGE_STRUCT
bool
default y

config DMI
bool
default y
Expand Down
17 changes: 15 additions & 2 deletions trunk/include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,16 @@ struct page {
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
atomic_t _count; /* Usage count, see below. */
atomic_t _mapcount; /* Count of ptes mapped in mms,
union {
atomic_t _mapcount; /* Count of ptes mapped in mms,
* to show when page is mapped
* & limit reverse map searches.
*/
struct { /* SLUB uses */
short unsigned int inuse;
short unsigned int offset;
};
};
union {
struct {
unsigned long private; /* Mapping-private opaque data:
Expand All @@ -43,8 +49,15 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t ptl;
#endif
struct { /* SLUB uses */
struct page *first_page; /* Compound pages */
struct kmem_cache *slab; /* Pointer to slab */
};
};
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* SLUB: pointer to free object */
};
pgoff_t index; /* Our offset within mapping. */
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
Expand Down
3 changes: 3 additions & 0 deletions trunk/include/linux/poison.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@
#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */

#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc

/* ...and for poisoning */
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
#define POISON_FREE 0x6b /* for use-after-free poisoning */
Expand Down
14 changes: 10 additions & 4 deletions trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */

/* Flags passed to a constructor functions */
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
Expand All @@ -42,7 +43,7 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
* struct kmem_cache related prototypes
*/
void __init kmem_cache_init(void);
extern int slab_is_available(void);
int slab_is_available(void);

struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
Expand Down Expand Up @@ -95,9 +96,14 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* the appropriate general cache at compile time.
*/

#ifdef CONFIG_SLAB
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#else
#include <linux/slab_def.h>
#endif /* !CONFIG_SLUB */
#else

/*
* Fallback definitions for an allocator not wanting to provide
* its own optimized kmalloc definitions (like SLOB).
Expand Down Expand Up @@ -184,7 +190,7 @@ static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
#ifdef CONFIG_DEBUG_SLAB
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
Expand All @@ -202,7 +208,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
#ifdef CONFIG_DEBUG_SLAB
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
Expand Down
201 changes: 201 additions & 0 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
#ifndef _LINUX_SLUB_DEF_H
#define _LINUX_SLUB_DEF_H

/*
* SLUB : A Slab allocator without object queues.
*
* (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
*/
#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>

struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
};

/*
* Slab cache management.
*/
struct kmem_cache {
/* Used for retriving partial slabs etc */
unsigned long flags;
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
unsigned int order;

/*
* Avoid an extra cache line for UP, SMP and for the node local to
* struct kmem_cache.
*/
struct kmem_cache_node local_node;

/* Allocation and freeing of slabs */
int objects; /* Number of objects in slab */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *, struct kmem_cache *, unsigned long);
void (*dtor)(void *, struct kmem_cache *, unsigned long);
int inuse; /* Offset to metadata */
int align; /* Alignment */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
struct kobject kobj; /* For sysfs */

#ifdef CONFIG_NUMA
int defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#endif
struct page *cpu_slab[NR_CPUS];
};

/*
* Kmalloc subsystem.
*/
#define KMALLOC_SHIFT_LOW 3

#ifdef CONFIG_LARGE_ALLOCS
#define KMALLOC_SHIFT_HIGH 25
#else
#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
#define KMALLOC_SHIFT_HIGH 20
#else
#define KMALLOC_SHIFT_HIGH 18
#endif
#endif

/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];

/*
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
static inline int kmalloc_index(int size)
{
if (size == 0)
return 0;
if (size > 64 && size <= 96)
return 1;
if (size > 128 && size <= 192)
return 2;
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
if (size <= 64) return 6;
if (size <= 128) return 7;
if (size <= 256) return 8;
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
#if KMALLOC_SHIFT_HIGH > 18
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
#endif
#if KMALLOC_SHIFT_HIGH > 20
if (size <= 2 * 1024 * 1024) return 21;
if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
#endif
return -1;

/*
* What we really wanted to do and cannot do because of compiler issues is:
* int i;
* for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
* if (size <= (1 << i))
* return i;
*/
}

/*
* Find the slab cache for a given combination of allocation flags and size.
*
* This ought to end up with a global pointer to the right cache
* in kmalloc_caches.
*/
static inline struct kmem_cache *kmalloc_slab(size_t size)
{
int index = kmalloc_index(size);

if (index == 0)
return NULL;

if (index < 0) {
/*
* Generate a link failure. Would be great if we could
* do something to stop the compile here.
*/
extern void __kmalloc_size_too_large(void);
__kmalloc_size_too_large();
}
return &kmalloc_caches[index];
}

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA 0
#endif

static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
return NULL;

return kmem_cache_alloc(s, flags);
} else
return __kmalloc(size, flags);
}

static inline void *kzalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
return NULL;

return kmem_cache_zalloc(s, flags);
} else
return __kzalloc(size, flags);
}

#ifdef CONFIG_NUMA
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);

static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
return NULL;

return kmem_cache_alloc_node(s, flags, node);
} else
return __kmalloc_node(size, flags, node);
}
#endif

#endif /* _LINUX_SLUB_DEF_H */
53 changes: 40 additions & 13 deletions trunk/init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -478,15 +478,6 @@ config SHMEM
option replaces shmem and tmpfs with the much simpler ramfs code,
which may be appropriate on small systems without swap.

config SLAB
default y
bool "Use full SLAB allocator" if (EMBEDDED && !SMP && !SPARSEMEM)
help
Disabling this replaces the advanced SLAB allocator and
kmalloc support with the drastically simpler SLOB allocator.
SLOB is more space efficient but does not scale well and is
more susceptible to fragmentation.

config VM_EVENT_COUNTERS
default y
bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
Expand All @@ -496,6 +487,46 @@ config VM_EVENT_COUNTERS
on EMBEDDED systems. /proc/vmstat will only show page counts
if VM event counters are disabled.

choice
prompt "Choose SLAB allocator"
default SLAB
help
This option allows to select a slab allocator.

config SLAB
bool "SLAB"
help
The regular slab allocator that is established and known to work
well in all environments. It organizes chache hot objects in
per cpu and per node queues. SLAB is the default choice for
slab allocator.

config SLUB
depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT
bool "SLUB (Unqueued Allocator)"
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
Per cpu caching is realized using slabs of objects instead
of queues of objects. SLUB can use memory efficiently
way and has enhanced diagnostics.

config SLOB
#
# SLOB cannot support SMP because SLAB_DESTROY_BY_RCU does not work
# properly.
#
depends on EMBEDDED && !SMP && !SPARSEMEM
bool "SLOB (Simple Allocator)"
help
SLOB replaces the SLAB allocator with a drastically simpler
allocator. SLOB is more space efficient that SLAB but does not
scale well (single lock for all operations) and is more susceptible
to fragmentation. SLOB it is a great choice to reduce
memory usage and code size for embedded systems.

endchoice

endmenu # General setup

config RT_MUTEXES
Expand All @@ -511,10 +542,6 @@ config BASE_SMALL
default 0 if BASE_FULL
default 1 if !BASE_FULL

config SLOB
default !SLAB
bool

menu "Loadable module support"

config MODULES
Expand Down
Loading

0 comments on commit 4407155

Please sign in to comment.