Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 292683
b: refs/heads/master
c: b240285
h: refs/heads/master
i:
  292681: 4d4bbdd
  292679: 179dbba
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Mar 22, 2012
1 parent 4756011 commit e19ad94
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 33 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ca464d69b19120a826aa2534de2511a6f542edf5
refs/heads/master: b24028572fb69e9dd6de8c359eba2b2c66baa889
8 changes: 1 addition & 7 deletions trunk/include/linux/page_cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
enum {
/* flags for mem_cgroup */
PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
PCG_CACHE, /* charged as cache */
PCG_USED, /* this object is in use. */
PCG_MIGRATION, /* under page migration */
/* flags for mem_cgroup and file and I/O status */
Expand Down Expand Up @@ -64,11 +63,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }

/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
CLEARPCGFLAG(Cache, CACHE)
SETPCGFLAG(Cache, CACHE)

TESTPCGFLAG(Used, USED)
CLEARPCGFLAG(Used, USED)
SETPCGFLAG(Used, USED)
Expand All @@ -85,7 +79,7 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
{
/*
* Don't take this lock in IRQ context.
* This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
* This lock is for pc->mem_cgroup, USED, MIGRATION
*/
bit_spin_lock(PCG_LOCK, &pc->flags);
}
Expand Down
57 changes: 32 additions & 25 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -690,15 +690,19 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
}

static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
bool file, int nr_pages)
bool anon, int nr_pages)
{
preempt_disable();

if (file)
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
/*
* Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
* counted as CACHE even if it's on ANON LRU.
*/
if (anon)
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
nr_pages);
else
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
nr_pages);

/* pagein of a big page is an event. So, ignore page size */
Expand Down Expand Up @@ -2442,6 +2446,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{
struct zone *uninitialized_var(zone);
bool was_on_lru = false;
bool anon;

lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
Expand Down Expand Up @@ -2477,19 +2482,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* See mem_cgroup_add_lru_list(), etc.
*/
smp_wmb();
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_CACHE:
case MEM_CGROUP_CHARGE_TYPE_SHMEM:
SetPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
ClearPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
default:
break;
}
SetPageCgroupUsed(pc);

if (lrucare) {
if (was_on_lru) {
Expand All @@ -2500,7 +2493,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
spin_unlock_irq(&zone->lru_lock);
}

mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
anon = true;
else
anon = false;

mem_cgroup_charge_statistics(memcg, anon, nr_pages);
unlock_page_cgroup(pc);

/*
Expand Down Expand Up @@ -2565,6 +2563,7 @@ static int mem_cgroup_move_account(struct page *page,
{
unsigned long flags;
int ret;
bool anon = PageAnon(page);

VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(page));
Expand Down Expand Up @@ -2593,14 +2592,14 @@ static int mem_cgroup_move_account(struct page *page,
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
preempt_enable();
}
mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
mem_cgroup_charge_statistics(from, anon, -nr_pages);
if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */
__mem_cgroup_cancel_charge(from, nr_pages);

/* caller should have done css_get */
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
mem_cgroup_charge_statistics(to, anon, nr_pages);
/*
* We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of
Expand Down Expand Up @@ -2921,6 +2920,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
struct page_cgroup *pc;
bool anon;

if (mem_cgroup_disabled())
return NULL;
Expand All @@ -2946,8 +2946,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (!PageCgroupUsed(pc))
goto unlock_out;

anon = PageAnon(page);

switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
anon = true;
/* fallthrough */
case MEM_CGROUP_CHARGE_TYPE_DROP:
/* See mem_cgroup_prepare_migration() */
if (page_mapped(page) || PageCgroupMigration(pc))
Expand All @@ -2964,7 +2968,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break;
}

mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
mem_cgroup_charge_statistics(memcg, anon, -nr_pages);

ClearPageCgroupUsed(pc);
/*
Expand Down Expand Up @@ -3271,6 +3275,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
{
struct page *used, *unused;
struct page_cgroup *pc;
bool anon;

if (!memcg)
return;
Expand All @@ -3292,8 +3297,10 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
lock_page_cgroup(pc);
ClearPageCgroupMigration(pc);
unlock_page_cgroup(pc);

__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
anon = PageAnon(used);
__mem_cgroup_uncharge_common(unused,
anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
: MEM_CGROUP_CHARGE_TYPE_CACHE);

/*
* If a page is a file cache, radix-tree replacement is very atomic
Expand All @@ -3303,7 +3310,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
* and USED bit check in mem_cgroup_uncharge_page() will do enough
* check. (see prepare_charge() also)
*/
if (PageAnon(used))
if (anon)
mem_cgroup_uncharge_page(used);
/*
* At migration, we may charge account against cgroup which has no
Expand Down Expand Up @@ -3333,7 +3340,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
/* fix accounting on old pages */
lock_page_cgroup(pc);
memcg = pc->mem_cgroup;
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
mem_cgroup_charge_statistics(memcg, false, -1);
ClearPageCgroupUsed(pc);
unlock_page_cgroup(pc);

Expand Down

0 comments on commit e19ad94

Please sign in to comment.