Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (27 commits)
  SLUB: Fix memory hotplug with !NUMA
  slub: Move functions to reduce #ifdefs
  slub: Enable sysfs support for !CONFIG_SLUB_DEBUG
  SLUB: Optimize slab_free() debug check
  slub: Move NUMA-related functions under CONFIG_NUMA
  slub: Add lock release annotation
  slub: Fix signedness warnings
  slub: extract common code to remove objects from partial list without locking
  SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
  slub: reduce differences between SMP and NUMA
  Revert "Slub: UP bandaid"
  percpu: clear memory allocated with the km allocator
  percpu: use percpu allocator on UP too
  percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
  vmalloc: pcpu_get/free_vm_areas() aren't needed on UP
  SLUB: Fix merged slab cache names
  Slub: UP bandaid
  slub: fix SLUB_RESILIENCY_TEST for dynamic kmalloc caches
  slub: Fix up missing kmalloc_cache -> kmem_cache_node case for memoryhotplug
  slub: Add dummy functions for the !SLUB_DEBUG case
  ...
  • Loading branch information
Linus Torvalds committed Oct 24, 2010
2 parents 1765a1f + 6d4121f commit 76c39e4
Show file tree
Hide file tree
Showing 4 changed files with 424 additions and 384 deletions.
14 changes: 4 additions & 10 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ struct kmem_cache {
unsigned long min_partial;
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif

Expand All @@ -96,11 +96,8 @@ struct kmem_cache {
* Defragmentation by allocating from a remote node.
*/
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#else
/* Avoid an extra cache line for UP */
struct kmem_cache_node local_node;
#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};

/*
Expand Down Expand Up @@ -139,19 +136,16 @@ struct kmem_cache {

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif

/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];

/*
* Sorry that the following has to be that ugly but some versions of GCC
Expand Down Expand Up @@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
if (index == 0)
return NULL;

return &kmalloc_caches[index];
return kmalloc_caches[index];
}

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
Expand Down
2 changes: 1 addition & 1 deletion lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
depends on SLUB && SLUB_DEBUG && SYSFS
depends on SLUB && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be
Expand Down
4 changes: 3 additions & 1 deletion mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else {
unsigned int order = get_order(size);

ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (likely(order))
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
Expand Down
Loading

0 comments on commit 76c39e4

Please sign in to comment.