Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 151442
b: refs/heads/master
c: 8eae985
h: refs/heads/master
v: v3
  • Loading branch information
Pekka Enberg authored and Vegard Nossum committed Jun 13, 2009
1 parent a1d947c commit a42e1ab
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 82 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b618ad31bb2020db6a36929122e5554e33210d47
refs/heads/master: 8eae985f08138758e06503588f5f1196269bc415
81 changes: 81 additions & 0 deletions trunk/include/linux/slab_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,87 @@
#include <linux/compiler.h>
#include <linux/kmemtrace.h>

/*
* struct kmem_cache
*
* manages a cache.
*/

struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
/* 2) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;

unsigned int buffer_size;
u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */

unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */

/* 4) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;

/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;

size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */

/* constructor func */
void (*ctor)(void *obj);

/* 5) cache creation/removal */
const char *name;
struct list_head next;

/* 6) statistics */
#ifdef CONFIG_DEBUG_SLAB
unsigned long num_active;
unsigned long num_allocations;
unsigned long high_mark;
unsigned long grown;
unsigned long reaped;
unsigned long errors;
unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;

/*
* If debugging is enabled, then the allocator can add additional
* fields and/or padding to every object. buffer_size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
int obj_size;
#endif /* CONFIG_DEBUG_SLAB */

/*
* We put nodelists[] at the end of kmem_cache, because we want to size
* this array to nr_node_ids slots instead of MAX_NUMNODES
* (see kmem_cache_init())
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of nodes.
*/
struct kmem_list3 *nodelists[MAX_NUMNODES];
/*
* Do not add fields after nodelists[]
*/
};

/* Size description struct for general caches. */
struct cache_sizes {
size_t cs_size;
Expand Down
81 changes: 0 additions & 81 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,87 +374,6 @@ static void kmem_list3_init(struct kmem_list3 *parent)
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
} while (0)

/*
* struct kmem_cache
*
* manages a cache.
*/

struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
/* 2) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;

unsigned int buffer_size;
u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */

unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */

/* 4) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;

/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;

size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */

/* constructor func */
void (*ctor)(void *obj);

/* 5) cache creation/removal */
const char *name;
struct list_head next;

/* 6) statistics */
#if STATS
unsigned long num_active;
unsigned long num_allocations;
unsigned long high_mark;
unsigned long grown;
unsigned long reaped;
unsigned long errors;
unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
#endif
#if DEBUG
/*
* If debugging is enabled, then the allocator can add additional
* fields and/or padding to every object. buffer_size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
int obj_size;
#endif
/*
* We put nodelists[] at the end of kmem_cache, because we want to size
* this array to nr_node_ids slots instead of MAX_NUMNODES
* (see kmem_cache_init())
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of nodes.
*/
struct kmem_list3 *nodelists[MAX_NUMNODES];
/*
* Do not add fields after nodelists[]
*/
};

#define CFLGS_OFF_SLAB (0x80000000UL)
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)

Expand Down

0 comments on commit a42e1ab

Please sign in to comment.