Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 292688
b: refs/heads/master
c: 89c06bd
h: refs/heads/master
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Mar 22, 2012
1 parent d51b301 commit e335c8a
Show file tree
Hide file tree
Showing 4 changed files with 102 additions and 25 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 312734c04e2fecc58429aec98194e4ff12d8f7d6
refs/heads/master: 89c06bd52fb9ffceddf84f7309d2e8c9f1666216
35 changes: 35 additions & 0 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,31 @@ static inline bool mem_cgroup_disabled(void)
return false;
}

void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
unsigned long *flags);

static inline void mem_cgroup_begin_update_page_stat(struct page *page,
bool *locked, unsigned long *flags)
{
if (mem_cgroup_disabled())
return;
rcu_read_lock();
*locked = false;
return __mem_cgroup_begin_update_page_stat(page, locked, flags);
}

void __mem_cgroup_end_update_page_stat(struct page *page,
unsigned long *flags);
static inline void mem_cgroup_end_update_page_stat(struct page *page,
bool *locked, unsigned long *flags)
{
if (mem_cgroup_disabled())
return;
if (*locked)
__mem_cgroup_end_update_page_stat(page, flags);
rcu_read_unlock();
}

void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx,
int val);
Expand Down Expand Up @@ -341,6 +366,16 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline void mem_cgroup_begin_update_page_stat(struct page *page,
bool *locked, unsigned long *flags)
{
}

static inline void mem_cgroup_end_update_page_stat(struct page *page,
bool *locked, unsigned long *flags)
{
}

static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx)
{
Expand Down
62 changes: 42 additions & 20 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1910,32 +1910,59 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
* If there is, we take a lock.
*/

void __mem_cgroup_begin_update_page_stat(struct page *page,
bool *locked, unsigned long *flags)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc;

pc = lookup_page_cgroup(page);
again:
memcg = pc->mem_cgroup;
if (unlikely(!memcg || !PageCgroupUsed(pc)))
return;
/*
* If this memory cgroup is not under account moving, we don't
* need to take move_lock_page_cgroup(). Because we already hold
* rcu_read_lock(), any calls to move_account will be delayed until
* rcu_read_unlock() if mem_cgroup_stealed() == true.
*/
if (!mem_cgroup_stealed(memcg))
return;

move_lock_mem_cgroup(memcg, flags);
if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
move_unlock_mem_cgroup(memcg, flags);
goto again;
}
*locked = true;
}

void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
{
struct page_cgroup *pc = lookup_page_cgroup(page);

/*
* It's guaranteed that pc->mem_cgroup never changes while
* lock is held because a routine modifies pc->mem_cgroup
* should take move_lock_page_cgroup().
*/
move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}

void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_page_stat_item idx, int val)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc = lookup_page_cgroup(page);
bool need_unlock = false;
unsigned long uninitialized_var(flags);

if (mem_cgroup_disabled())
return;
again:
rcu_read_lock();

memcg = pc->mem_cgroup;
if (unlikely(!memcg || !PageCgroupUsed(pc)))
goto out;
/* pc->mem_cgroup is unstable ? */
if (unlikely(mem_cgroup_stealed(memcg))) {
/* take a lock against to access pc->mem_cgroup */
move_lock_mem_cgroup(memcg, &flags);
if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
move_unlock_mem_cgroup(memcg, &flags);
rcu_read_unlock();
goto again;
}
need_unlock = true;
}
return;

switch (idx) {
case MEMCG_NR_FILE_MAPPED:
Expand All @@ -1950,11 +1977,6 @@ void mem_cgroup_update_page_stat(struct page *page,
}

this_cpu_add(memcg->stat->count[idx], val);

out:
if (unlikely(need_unlock))
move_unlock_mem_cgroup(memcg, &flags);
rcu_read_unlock();
}

/*
Expand Down
28 changes: 24 additions & 4 deletions trunk/mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1148,10 +1148,15 @@ void page_add_new_anon_rmap(struct page *page,
*/
void page_add_file_rmap(struct page *page)
{
bool locked;
unsigned long flags;

mem_cgroup_begin_update_page_stat(page, &locked, &flags);
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
}
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}

/**
Expand All @@ -1162,9 +1167,21 @@ void page_add_file_rmap(struct page *page)
*/
void page_remove_rmap(struct page *page)
{
bool anon = PageAnon(page);
bool locked;
unsigned long flags;

/*
* The anon case has no mem_cgroup page_stat to update; but may
* uncharge_page() below, where the lock ordering can deadlock if
* we hold the lock against page_stat move: so avoid it on anon.
*/
if (!anon)
mem_cgroup_begin_update_page_stat(page, &locked, &flags);

/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
return;
goto out;

/*
* Now that the last pte has gone, s390 must transfer dirty
Expand All @@ -1173,16 +1190,16 @@ void page_remove_rmap(struct page *page)
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
*/
if ((!PageAnon(page) || PageSwapCache(page)) &&
if ((!anon || PageSwapCache(page)) &&
page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
/*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now.
*/
if (unlikely(PageHuge(page)))
return;
if (PageAnon(page)) {
goto out;
if (anon) {
mem_cgroup_uncharge_page(page);
if (!PageTransHuge(page))
__dec_zone_page_state(page, NR_ANON_PAGES);
Expand All @@ -1202,6 +1219,9 @@ void page_remove_rmap(struct page *page)
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
out:
if (!anon)
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}

/*
Expand Down

0 comments on commit e335c8a

Please sign in to comment.