Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 58202
b: refs/heads/master
c: 87a927c
h: refs/heads/master
v: v3
  • Loading branch information
David Woodhouse authored and Linus Torvalds committed Jul 5, 2007
1 parent f365172 commit edadf01
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2bcb1b7de9eeea969a25d5f2b4511195cca9f2a2
refs/heads/master: 87a927c715789853cc8331d76039a2fd657a832a
32 changes: 23 additions & 9 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@

/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))

#ifndef cache_line_size
#define cache_line_size() L1_CACHE_BYTES
Expand Down Expand Up @@ -547,7 +548,7 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
if (cachep->flags & SLAB_STORE_USER)
return (unsigned long long *)(objp + cachep->buffer_size -
sizeof(unsigned long long) -
BYTES_PER_WORD);
REDZONE_ALIGN);
return (unsigned long long *) (objp + cachep->buffer_size -
sizeof(unsigned long long));
}
Expand Down Expand Up @@ -2178,7 +2179,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2 * sizeof(unsigned long long)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
Expand Down Expand Up @@ -2219,12 +2221,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}

/*
* Redzoning and user store require word alignment. Note this will be
* overridden by architecture or caller mandated alignment if either
* is greater than BYTES_PER_WORD.
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
* alignment if either is greater than BYTES_PER_WORD.
*/
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
ralign = __alignof__(unsigned long long);
if (flags & SLAB_STORE_USER)
ralign = BYTES_PER_WORD;

if (flags & SLAB_RED_ZONE) {
ralign = REDZONE_ALIGN;
/* If redzoning, ensure that the second redzone is suitably
* aligned, by adjusting the object size accordingly. */
size += REDZONE_ALIGN - 1;
size &= ~(REDZONE_ALIGN - 1);
}

/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
Expand Down Expand Up @@ -2261,9 +2271,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
* the real object.
* the real object. But if the second red zone needs to be
* aligned to 64 bits, we must allow that much space.
*/
size += BYTES_PER_WORD;
if (flags & SLAB_RED_ZONE)
size += REDZONE_ALIGN;
else
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
Expand Down

0 comments on commit edadf01

Please sign in to comment.