Skip to content

Commit

Permalink
slub: explicitly document position of inserting slab to partial list
Browse files Browse the repository at this point in the history
Adding slab to partial list head/tail is sensitive to performance.
So explicitly uses DEACTIVATE_TO_TAIL/DEACTIVATE_TO_HEAD to document
it to avoid we get it wrong.

Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Shaohua Li <shli@kernel.org>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Shaohua Li authored and Pekka Enberg committed Aug 27, 2011
1 parent 130655e commit 136333d
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
n->nr_partial++;
if (tail)
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
Expand Down Expand Up @@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
enum slab_modes l = M_NONE, m = M_NONE;
void *freelist;
void *nextfree;
int tail = 0;
int tail = DEACTIVATE_TO_HEAD;
struct page new;
struct page old;

if (page->freelist) {
stat(s, DEACTIVATE_REMOTE_FREES);
tail = 1;
tail = DEACTIVATE_TO_TAIL;
}

c->tid = next_tid(c->tid);
Expand Down Expand Up @@ -1893,7 +1893,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
if (m == M_PARTIAL) {

add_partial(n, page, tail);
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
stat(s, tail);

} else if (m == M_FULL) {

Expand Down Expand Up @@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (unlikely(!prior)) {
remove_full(s, page);
add_partial(n, page, 1);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
}
Expand Down Expand Up @@ -2695,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node)
init_kmem_cache_node(n, kmem_cache_node);
inc_slabs_node(kmem_cache_node, node, page->objects);

add_partial(n, page, 0);
add_partial(n, page, DEACTIVATE_TO_HEAD);
}

static void free_kmem_cache_nodes(struct kmem_cache *s)
Expand Down

0 comments on commit 136333d

Please sign in to comment.