Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 16826
b: refs/heads/master
c: 4d268eb
h: refs/heads/master
v: v3
  • Loading branch information
Pekka Enberg authored and Linus Torvalds committed Jan 9, 2006
1 parent 636f114 commit 98127f4
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 41 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 85289f98ddc13f6cea82c59d6ff78f9d205dfccc
refs/heads/master: 4d268eba1187ef66844a6a33b9431e5d0dadd4ad
89 changes: 49 additions & 40 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1473,6 +1473,53 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
}
}

/**
* calculate_slab_order - calculate size (page order) of slabs and the number
* of objects per slab.
*
* This could be made much more intelligent. For now, try to avoid using
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
size_t align, gfp_t flags)
{
size_t left_over = 0;

for ( ; ; cachep->gfporder++) {
unsigned int num;
size_t remainder;

if (cachep->gfporder > MAX_GFP_ORDER) {
cachep->num = 0;
break;
}

cache_estimate(cachep->gfporder, size, align, flags,
&remainder, &num);
if (!num)
continue;
/* More than offslab_limit objects will cause problems */
if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit)
break;

cachep->num = num;
left_over = remainder;

/*
* Large number of objects is good, but very large slabs are
* currently bad for the gfp()s.
*/
if (cachep->gfporder >= slab_break_gfp_order)
break;

if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder))
/* Acceptable internal fragmentation */
break;
}
return left_over;
}

/**
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
Expand Down Expand Up @@ -1682,46 +1729,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->gfporder = 0;
cache_estimate(cachep->gfporder, size, align, flags,
&left_over, &cachep->num);
} else {
/*
* Calculate size (in pages) of slabs, and the num of objs per
* slab. This could be made much more intelligent. For now,
* try to avoid using high page-orders for slabs. When the
* gfp() funcs are more friendly towards high-order requests,
* this should be changed.
*/
do {
unsigned int break_flag = 0;
cal_wastage:
cache_estimate(cachep->gfporder, size, align, flags,
&left_over, &cachep->num);
if (break_flag)
break;
if (cachep->gfporder >= MAX_GFP_ORDER)
break;
if (!cachep->num)
goto next;
if (flags & CFLGS_OFF_SLAB &&
cachep->num > offslab_limit) {
/* This num of objs will cause problems. */
cachep->gfporder--;
break_flag++;
goto cal_wastage;
}

/*
* Large num of objs is good, but v. large slabs are
* currently bad for the gfp()s.
*/
if (cachep->gfporder >= slab_break_gfp_order)
break;

if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
break; /* Acceptable internal fragmentation. */
next:
cachep->gfporder++;
} while (1);
}
} else
left_over = calculate_slab_order(cachep, size, align, flags);

if (!cachep->num) {
printk("kmem_cache_create: couldn't create cache %s.\n", name);
Expand Down

0 comments on commit 98127f4

Please sign in to comment.