Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 248618
b: refs/heads/master
c: 5f80b13
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Apr 17, 2011
1 parent 5f445be commit 138a13f
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 33de04ec4cb80b6bd0782e88a64954e60bc15dc1
refs/heads/master: 5f80b13ae45df7da6646d1881da186318e70b6b6
34 changes: 22 additions & 12 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -271,10 +271,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
__p += (__s)->size)

/* Scan freelist */
#define for_each_free_object(__p, __s, __free) \
for (__p = (__free); __p; __p = get_freepointer((__s), __p))

/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
Expand Down Expand Up @@ -330,6 +326,21 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
return x.x & OO_MASK;
}

/*
* Determine a map of object in use on a page.
*
* Slab lock or node listlock must be held to guarantee that the page does
* not vanish from under us.
*/
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
{
void *p;
void *addr = page_address(page);

for (p = page->freelist; p; p = get_freepointer(s, p))
set_bit(slab_index(p, s, addr), map);
}

#ifdef CONFIG_SLUB_DEBUG
/*
* Debug settings:
Expand Down Expand Up @@ -2673,9 +2684,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
return;
slab_err(s, page, "%s", text);
slab_lock(page);
for_each_free_object(p, s, page->freelist)
set_bit(slab_index(p, s, addr), map);

get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {

if (!test_bit(slab_index(p, s, addr), map)) {
Expand Down Expand Up @@ -3610,10 +3620,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
/* Now we know that a valid freelist exists */
bitmap_zero(map, page->objects);

for_each_free_object(p, s, page->freelist) {
set_bit(slab_index(p, s, addr), map);
if (!check_object(s, page, p, SLUB_RED_INACTIVE))
return 0;
get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {
if (test_bit(slab_index(p, s, addr), map))
if (!check_object(s, page, p, SLUB_RED_INACTIVE))
return 0;
}

for_each_object(p, s, addr, page->objects)
Expand Down Expand Up @@ -3821,8 +3832,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
void *p;

bitmap_zero(map, page->objects);
for_each_free_object(p, s, page->freelist)
set_bit(slab_index(p, s, addr), map);
get_map(s, page, map);

for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))
Expand Down

0 comments on commit 138a13f

Please sign in to comment.