Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 19384
b: refs/heads/master
c: fbaccac
h: refs/heads/master
v: v3
  • Loading branch information
Steven Rostedt authored and Linus Torvalds committed Feb 1, 2006
1 parent fed2f13 commit c6d9c49
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5ec8a847bb8ae2ba6395cfb7cb4bfdc78ada82ed
refs/heads/master: fbaccacff1f17c65ae0972085368a7ec75be6062
81 changes: 59 additions & 22 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -702,32 +702,69 @@ kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
}
EXPORT_SYMBOL(kmem_find_general_cachep);

/* Cal the num objs, wastage, and bytes left over for a given slab size. */
static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
int flags, size_t *left_over, unsigned int *num)
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
int i;
size_t wastage = PAGE_SIZE << gfporder;
size_t extra = 0;
size_t base = 0;
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}

if (!(flags & CFLGS_OFF_SLAB)) {
base = sizeof(struct slab);
extra = sizeof(kmem_bufctl_t);
}
i = 0;
while (i * size + ALIGN(base + i * extra, align) <= wastage)
i++;
if (i > 0)
i--;
/* Calculate the number of objects and left-over bytes for a given
buffer size. */
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
size_t align, int flags, size_t *left_over,
unsigned int *num)
{
int nr_objs;
size_t mgmt_size;
size_t slab_size = PAGE_SIZE << gfporder;

if (i > SLAB_LIMIT)
i = SLAB_LIMIT;
/*
* The slab management structure can be either off the slab or
* on it. For the latter case, the memory allocated for a
* slab is used for:
*
* - The struct slab
* - One kmem_bufctl_t for each object
* - Padding to respect alignment of @align
* - @buffer_size bytes for each object
*
* If the slab management structure is off the slab, then the
* alignment will already be calculated into the size. Because
* the slabs are all pages aligned, the objects will be at the
* correct alignment when allocated.
*/
if (flags & CFLGS_OFF_SLAB) {
mgmt_size = 0;
nr_objs = slab_size / buffer_size;

*num = i;
wastage -= i * size;
wastage -= ALIGN(base + i * extra, align);
*left_over = wastage;
if (nr_objs > SLAB_LIMIT)
nr_objs = SLAB_LIMIT;
} else {
/*
* Ignore padding for the initial guess. The padding
* is at most @align-1 bytes, and @buffer_size is at
* least @align. In the worst case, this result will
* be one greater than the number of objects that fit
* into the memory allocation when taking the padding
* into account.
*/
nr_objs = (slab_size - sizeof(struct slab)) /
(buffer_size + sizeof(kmem_bufctl_t));

/*
* This calculated number will be either the right
* amount, or one greater than what we want.
*/
if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
> slab_size)
nr_objs--;

if (nr_objs > SLAB_LIMIT)
nr_objs = SLAB_LIMIT;

mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}

#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
Expand Down

0 comments on commit c6d9c49

Please sign in to comment.