Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 85511
b: refs/heads/master
c: 71c7a06
h: refs/heads/master
i:
  85509: a151297
  85507: 3c09f4c
  85503: a370114
v: v3
  • Loading branch information
Christoph Lameter authored and Christoph Lameter committed Feb 14, 2008
1 parent d23ea0e commit 95d607b
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b7a49f0d4c34166ae84089d9f145cfaae1b0eec5
refs/heads/master: 71c7a06ff0a2ba0434ace4d7aa679537c4211d9d
43 changes: 38 additions & 5 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */

/* Not all arches define cache_line_size */
#ifndef cache_line_size
Expand Down Expand Up @@ -1539,7 +1541,6 @@ static void *__slab_alloc(struct kmem_cache *s,
unlock_out:
slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH);
out:
#ifdef SLUB_FASTPATH
local_irq_restore(flags);
#endif
Expand Down Expand Up @@ -1574,8 +1575,24 @@ static void *__slab_alloc(struct kmem_cache *s,
c->page = new;
goto load_freelist;
}
object = NULL;
goto out;
#ifdef SLUB_FASTPATH
local_irq_restore(flags);
#endif
/*
* No memory available.
*
* If the slab uses higher order allocs but the object is
* smaller than a page size then we can fallback in emergencies
* to the page allocator via kmalloc_large. The page allocator may
* have failed to obtain a higher order page and we can try to
* allocate a single page if the object fits into a single page.
* That is only possible if certain conditions are met that are being
* checked when a slab is created.
*/
if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
return kmalloc_large(s->objsize, gfpflags);

return NULL;
debug:
object = c->page->freelist;
if (!alloc_debug_processing(s, c->page, object, addr))
Expand Down Expand Up @@ -2322,7 +2339,20 @@ static int calculate_sizes(struct kmem_cache *s)
size = ALIGN(size, align);
s->size = size;

s->order = calculate_order(size);
if ((flags & __KMALLOC_CACHE) &&
PAGE_SIZE / size < slub_min_objects) {
/*
* Kmalloc cache that would not have enough objects in
* an order 0 page. Kmalloc slabs can fallback to
* page allocator order 0 allocs so take a reasonably large
* order that will allows us a good number of objects.
*/
s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
s->flags |= __PAGE_ALLOC_FALLBACK;
s->allocflags |= __GFP_NOWARN;
} else
s->order = calculate_order(size);

if (s->order < 0)
return 0;

Expand Down Expand Up @@ -2539,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,

down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
flags | __KMALLOC_CACHE, NULL))
goto panic;

list_add(&s->list, &slab_caches);
Expand Down Expand Up @@ -3058,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;

if ((s->flags & __PAGE_ALLOC_FALLBACK)
return 1;

if (s->ctor)
return 1;

Expand Down

0 comments on commit 95d607b

Please sign in to comment.