Skip to content

Commit

Permalink
mm: memcg: remove unused node/section info from pc->flags
Browse files Browse the repository at this point in the history
To find the page corresponding to a certain page_cgroup, the pc->flags
encoded the node or section ID with the base array to compare the pc
pointer to.

Now that the per-memory cgroup LRU lists link page descriptors directly,
there is no longer any code that knows the struct page_cgroup of a PFN
but not the struct page.

[hughd@google.com: remove unused node/section info from pc->flags fix]
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed Jan 13, 2012
1 parent 925b767 commit 6b208e3
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 85 deletions.
33 changes: 0 additions & 33 deletions include/linux/page_cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,39 +121,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
local_irq_restore(*flags);
}

#ifdef CONFIG_SPARSEMEM
#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
#else
#define PCG_ARRAYID_WIDTH NODES_SHIFT
#endif

#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
#error Not enough space left in pc->flags to store page_cgroup array IDs
#endif

/* pc->flags: ARRAY-ID | FLAGS */

#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)

#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
/*
* Zero the shift count for non-existent fields, to prevent compiler
* warnings and ensure references are optimized away.
*/
#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))

static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
unsigned long id)
{
pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
}

static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
{
return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
}

#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct page_cgroup;

Expand Down
59 changes: 7 additions & 52 deletions mm/page_cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,6 @@
#include <linux/swapops.h>
#include <linux/kmemleak.h>

static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
{
pc->flags = 0;
set_page_cgroup_array_id(pc, id);
pc->mem_cgroup = NULL;
}
static unsigned long total_usage;

#if !defined(CONFIG_SPARSEMEM)
Expand All @@ -41,28 +35,13 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
return base + offset;
}

struct page *lookup_cgroup_page(struct page_cgroup *pc)
{
unsigned long pfn;
struct page *page;
pg_data_t *pgdat;

pgdat = NODE_DATA(page_cgroup_array_id(pc));
pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
page = pfn_to_page(pfn);
VM_BUG_ON(pc != lookup_page_cgroup(page));
return page;
}

static int __init alloc_node_page_cgroup(int nid)
{
struct page_cgroup *base, *pc;
struct page_cgroup *base;
unsigned long table_size;
unsigned long start_pfn, nr_pages, index;
unsigned long nr_pages;

start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages;

if (!nr_pages)
return 0;

Expand All @@ -72,10 +51,6 @@ static int __init alloc_node_page_cgroup(int nid)
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!base)
return -ENOMEM;
for (index = 0; index < nr_pages; index++) {
pc = base + index;
init_page_cgroup(pc, nid);
}
NODE_DATA(nid)->node_page_cgroup = base;
total_usage += table_size;
return 0;
Expand Down Expand Up @@ -116,23 +91,10 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
return section->page_cgroup + pfn;
}

struct page *lookup_cgroup_page(struct page_cgroup *pc)
{
struct mem_section *section;
struct page *page;
unsigned long nr;

nr = page_cgroup_array_id(pc);
section = __nr_to_section(nr);
page = pfn_to_page(pc - section->page_cgroup);
VM_BUG_ON(pc != lookup_page_cgroup(page));
return page;
}

static void *__meminit alloc_page_cgroup(size_t size, int nid)
{
gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
void *addr = NULL;
gfp_t flags = GFP_KERNEL | __GFP_NOWARN;

addr = alloc_pages_exact_nid(nid, size, flags);
if (addr) {
Expand All @@ -141,9 +103,9 @@ static void *__meminit alloc_page_cgroup(size_t size, int nid)
}

if (node_state(nid, N_HIGH_MEMORY))
addr = vmalloc_node(size, nid);
addr = vzalloc_node(size, nid);
else
addr = vmalloc(size);
addr = vzalloc(size);

return addr;
}
Expand All @@ -166,14 +128,11 @@ static void free_page_cgroup(void *addr)

static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
{
struct page_cgroup *base, *pc;
struct mem_section *section;
struct page_cgroup *base;
unsigned long table_size;
unsigned long nr;
int index;

nr = pfn_to_section_nr(pfn);
section = __nr_to_section(nr);
section = __pfn_to_section(pfn);

if (section->page_cgroup)
return 0;
Expand All @@ -193,10 +152,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
return -ENOMEM;
}

for (index = 0; index < PAGES_PER_SECTION; index++) {
pc = base + index;
init_page_cgroup(pc, nr);
}
/*
* The passed "pfn" may not be aligned to SECTION. For the calculation
* we need to apply a mask.
Expand Down

0 comments on commit 6b208e3

Please sign in to comment.