Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 56508
b: refs/heads/master
c: c59def9
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed May 17, 2007
1 parent 1b8a471 commit 5257d4a
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 63 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: afc0cedbe9138e3e8b38bfa1e4dfd01a2c537d62
refs/heads/master: c59def9f222d44bb7e2f0a559f2906191a0862d7
1 change: 0 additions & 1 deletion trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ struct kmem_cache {
int objects; /* Number of objects in slab */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *, struct kmem_cache *, unsigned long);
void (*dtor)(void *, struct kmem_cache *, unsigned long);
int inuse; /* Offset to metadata */
int align; /* Alignment */
const char *name; /* Name (only for display!) */
Expand Down
27 changes: 2 additions & 25 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,9 +409,6 @@ struct kmem_cache {
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);

/* de-constructor func */
void (*dtor) (void *, struct kmem_cache *, unsigned long);

/* 5) cache creation/removal */
const char *name;
struct list_head next;
Expand Down Expand Up @@ -1911,20 +1908,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
slab_error(cachep, "end of a freed object "
"was overwritten");
}
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
}
#else
static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
void *objp = index_to_obj(cachep, slabp, i);
(cachep->dtor) (objp, cachep, 0);
}
}
}
#endif

Expand Down Expand Up @@ -2124,7 +2112,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep)
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
* @dtor: A destructor for the objects.
* @dtor: A destructor for the objects (not implemented anymore).
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
Expand Down Expand Up @@ -2159,7 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
(size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
(size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || dtor) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();
Expand Down Expand Up @@ -2213,9 +2201,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(dtor);

/*
* Always checks flags, a caller might be expecting debug support which
* isn't available.
Expand Down Expand Up @@ -2370,7 +2355,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG_ON(!cachep->slabp_cache);
}
cachep->ctor = ctor;
cachep->dtor = dtor;
cachep->name = name;

if (setup_cpu_cache(cachep)) {
Expand Down Expand Up @@ -2835,7 +2819,6 @@ static int cache_grow(struct kmem_cache *cachep,
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
* - destructor calls, for caches with POISON+dtor
*/
static void kfree_debugcheck(const void *objp)
{
Expand Down Expand Up @@ -2894,12 +2877,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));

if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*/
cachep->dtor(objp + obj_offset(cachep), cachep, 0);
}
#ifdef CONFIG_DEBUG_SLAB_LEAK
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
Expand Down
5 changes: 0 additions & 5 deletions trunk/mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,6 @@ struct kmem_cache {
unsigned long flags;
const char *name;
void (*ctor)(void *, struct kmem_cache *, unsigned long);
void (*dtor)(void *, struct kmem_cache *, unsigned long);
};

struct kmem_cache *kmem_cache_create(const char *name, size_t size,
Expand All @@ -296,13 +295,11 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
c->name = name;
c->size = size;
if (flags & SLAB_DESTROY_BY_RCU) {
BUG_ON(dtor);
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
c->ctor = ctor;
c->dtor = dtor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < align)
Expand Down Expand Up @@ -371,8 +368,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
slob_rcu->size = c->size;
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
if (c->dtor)
c->dtor(b, c, 0);
__kmem_cache_free(b, c->size);
}
}
Expand Down
45 changes: 14 additions & 31 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,13 +891,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
* Debugging or ctor/dtors may create a need to move the free
* Debugging or ctor may create a need to move the free
* pointer. Fail if this happens.
*/
if (s->size >= 65535 * sizeof(void *)) {
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
BUG_ON(s->ctor || s->dtor);
BUG_ON(s->ctor);
}
else
/*
Expand Down Expand Up @@ -1030,15 +1030,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
{
int pages = 1 << s->order;

if (unlikely(SlabDebug(page) || s->dtor)) {
if (unlikely(SlabDebug(page))) {
void *p;

slab_pad_check(s, page);
for_each_object(p, s, page_address(page)) {
if (s->dtor)
s->dtor(p, s, 0);
for_each_object(p, s, page_address(page))
check_object(s, page, p, 0);
}
}

mod_zone_page_state(page_zone(page),
Expand Down Expand Up @@ -1871,7 +1868,7 @@ static int calculate_sizes(struct kmem_cache *s)
* then we should never poison the object itself.
*/
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
!s->ctor && !s->dtor)
!s->ctor)
s->flags |= __OBJECT_POISON;
else
s->flags &= ~__OBJECT_POISON;
Expand Down Expand Up @@ -1901,7 +1898,7 @@ static int calculate_sizes(struct kmem_cache *s)

#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
s->ctor || s->dtor)) {
s->ctor)) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
Expand Down Expand Up @@ -1970,13 +1967,11 @@ static int calculate_sizes(struct kmem_cache *s)
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *, struct kmem_cache *, unsigned long),
void (*dtor)(void *, struct kmem_cache *, unsigned long))
void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
s->dtor = dtor;
s->objsize = size;
s->flags = flags;
s->align = align;
Expand Down Expand Up @@ -2161,7 +2156,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,

down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL, NULL))
flags, NULL))
goto panic;

list_add(&s->list, &slab_caches);
Expand Down Expand Up @@ -2463,23 +2458,22 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;

if (s->ctor || s->dtor)
if (s->ctor)
return 1;

return 0;
}

static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *, struct kmem_cache *, unsigned long),
void (*dtor)(void *, struct kmem_cache *, unsigned long))
void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
struct list_head *h;

if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL;

if (ctor || dtor)
if (ctor)
return NULL;

size = ALIGN(size, sizeof(void *));
Expand Down Expand Up @@ -2521,8 +2515,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *s;

BUG_ON(dtor);
down_write(&slub_lock);
s = find_mergeable(size, align, flags, ctor, dtor);
s = find_mergeable(size, align, flags, ctor);
if (s) {
s->refcount++;
/*
Expand All @@ -2536,7 +2531,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
} else {
s = kmalloc(kmem_size, GFP_KERNEL);
if (s && kmem_cache_open(s, GFP_KERNEL, name,
size, align, flags, ctor, dtor)) {
size, align, flags, ctor)) {
if (sysfs_slab_add(s)) {
kfree(s);
goto err;
Expand Down Expand Up @@ -3177,17 +3172,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(ctor);

static ssize_t dtor_show(struct kmem_cache *s, char *buf)
{
if (s->dtor) {
int n = sprint_symbol(buf, (unsigned long)s->dtor);

return n + sprintf(buf + n, "\n");
}
return 0;
}
SLAB_ATTR_RO(dtor);

static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->refcount - 1);
Expand Down Expand Up @@ -3419,7 +3403,6 @@ static struct attribute * slab_attrs[] = {
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,
&dtor_attr.attr,
&aliases_attr.attr,
&align_attr.attr,
&sanity_checks_attr.attr,
Expand Down

0 comments on commit 5257d4a

Please sign in to comment.