Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 319553
b: refs/heads/master
c: ec3ab08
h: refs/heads/master
i:
  319551: e58ddfe
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Jun 1, 2012
1 parent 347322f commit a8f624e
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 21 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 188fd063208942a4681d8e8a4484ad0d4ae0fda1
refs/heads/master: ec3ab083a7a004282ee374bdaeb0aa603521b8eb
1 change: 0 additions & 1 deletion trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ struct kmem_cache_cpu {
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */
int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
Expand Down
35 changes: 16 additions & 19 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1561,7 +1561,6 @@ static void *get_partial_node(struct kmem_cache *s,

if (!object) {
c->page = page;
c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
Expand Down Expand Up @@ -2057,7 +2056,7 @@ static void flush_all(struct kmem_cache *s)
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE && c->node != node)
if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
return 0;
#endif
return 1;
Expand Down Expand Up @@ -2152,7 +2151,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
page->freelist = NULL;

stat(s, ALLOC_SLAB);
c->node = page_to_nid(page);
c->page = page;
*pc = c;
} else
Expand Down Expand Up @@ -2269,7 +2267,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (c->partial) {
c->page = c->partial;
c->partial = c->page->next;
c->node = page_to_nid(c->page);
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
Expand All @@ -2294,7 +2291,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

c->freelist = get_freepointer(s, freelist);
deactivate_slab(s, c);
c->node = NUMA_NO_NODE;
local_irq_restore(flags);
return freelist;
}
Expand Down Expand Up @@ -4507,30 +4503,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
int node = ACCESS_ONCE(c->node);
int node;
struct page *page;

if (node < 0)
continue;
page = ACCESS_ONCE(c->page);
if (page) {
if (flags & SO_TOTAL)
x = page->objects;
else if (flags & SO_OBJECTS)
x = page->inuse;
else
x = 1;
if (!page)
continue;

total += x;
nodes[node] += x;
}
page = c->partial;
node = page_to_nid(page);
if (flags & SO_TOTAL)
x = page->objects;
else if (flags & SO_OBJECTS)
x = page->inuse;
else
x = 1;

total += x;
nodes[node] += x;

page = ACCESS_ONCE(c->partial);
if (page) {
x = page->pobjects;
total += x;
nodes[node] += x;
}

per_cpu[node]++;
}
}
Expand Down

0 comments on commit a8f624e

Please sign in to comment.