Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 54522
b: refs/heads/master
c: b46b8f1
h: refs/heads/master
v: v3
  • Loading branch information
David Woodhouse authored and Linus Torvalds committed May 8, 2007
1 parent fd5b9de commit 3438b63
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 22 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5b94f675f57e4ff16c8fda09088d7480a84dcd91
refs/heads/master: b46b8f19c9cd435ecac4d9d12b39d78c137ecd66
4 changes: 2 additions & 2 deletions trunk/include/linux/poison.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */

#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
Expand Down
42 changes: 23 additions & 19 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,11 @@
* Usually, the kmalloc caches are cache_line_size() aligned, except when
* DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
* Note that this flag disables some debug features.
* alignment larger than the alignment of a 64-bit integer.
* ARCH_KMALLOC_MINALIGN allows that.
* Note that increasing this value may disable some debug features.
*/
#define ARCH_KMALLOC_MINALIGN 0
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

#ifndef ARCH_SLAB_MINALIGN
Expand Down Expand Up @@ -536,19 +537,22 @@ static int obj_size(struct kmem_cache *cachep)
return cachep->obj_size;
}

static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
return (unsigned long long*) (objp + obj_offset(cachep) -
sizeof(unsigned long long));
}

static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
return (unsigned long *)(objp + cachep->buffer_size -
2 * BYTES_PER_WORD);
return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
return (unsigned long long *)(objp + cachep->buffer_size -
sizeof(unsigned long long) -
BYTES_PER_WORD);
return (unsigned long long *) (objp + cachep->buffer_size -
sizeof(unsigned long long));
}

static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Expand All @@ -561,8 +565,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)

#define obj_offset(x) 0
#define obj_size(cachep) (cachep->buffer_size)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})

#endif
Expand Down Expand Up @@ -1776,7 +1780,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
char *realobj;

if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
Expand Down Expand Up @@ -2239,7 +2243,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* is greater than BYTES_PER_WORD.
*/
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
ralign = BYTES_PER_WORD;
ralign = __alignof__(unsigned long long);

/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
Expand All @@ -2250,7 +2254,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
ralign = align;
}
/* disable debug if necessary */
if (ralign > BYTES_PER_WORD)
if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
Expand All @@ -2271,8 +2275,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */
cachep->obj_offset += BYTES_PER_WORD;
size += 2 * BYTES_PER_WORD;
cachep->obj_offset += sizeof(unsigned long long);
size += 2 * sizeof(unsigned long long);
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
Expand Down Expand Up @@ -2833,7 +2837,7 @@ static void kfree_debugcheck(const void *objp)

static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
unsigned long redzone1, redzone2;
unsigned long long redzone1, redzone2;

redzone1 = *dbg_redzone1(cache, obj);
redzone2 = *dbg_redzone2(cache, obj);
Expand All @@ -2849,7 +2853,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
else
slab_error(cache, "memory outside object was overwritten");

printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
obj, redzone1, redzone2);
}

Expand Down Expand Up @@ -3065,7 +3069,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
printk(KERN_ERR
"%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
Expand Down

0 comments on commit 3438b63

Please sign in to comment.