Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 284360
b: refs/heads/master
c: ab936cb
h: refs/heads/master
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Jan 13, 2012
1 parent ab85276 commit 51828b6
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 28d82dc1c4edbc352129f97f4ca22624d1fe61de
refs/heads/master: ab936cbcd02072a34b60d268f94440fd5cf1970b
6 changes: 6 additions & 0 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@ struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account;
Expand Down Expand Up @@ -369,6 +371,10 @@ static inline
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
}
static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
}
#endif /* CONFIG_CGROUP_MEM_CONT */

#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
Expand Down
18 changes: 2 additions & 16 deletions trunk/mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
struct mem_cgroup *memcg = NULL;

VM_BUG_ON(!PageLocked(old));
VM_BUG_ON(!PageLocked(new));
VM_BUG_ON(new->mapping);

/*
* This is not page migration, but prepare_migration and
* end_migration does enough work for charge replacement.
*
* In the longer term we probably want a specialized function
* for moving the charge from old to new in a more efficient
* manner.
*/
error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
if (error)
return error;

error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
struct address_space *mapping = old->mapping;
Expand All @@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
/* mem_cgroup codes must not be called under tree_lock */
mem_cgroup_replace_page_cache(old, new);
radix_tree_preload_end();
if (freepage)
freepage(old);
page_cache_release(old);
mem_cgroup_end_migration(memcg, old, new, true);
} else {
mem_cgroup_end_migration(memcg, old, new, false);
}

return error;
Expand Down
44 changes: 44 additions & 0 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -3432,6 +3432,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
cgroup_release_and_wakeup_rmdir(&memcg->css);
}

/*
* At replace page cache, newpage is not under any memcg but it's on
* LRU. So, this function doesn't touch res_counter but handles LRU
* in correct way. Both pages are locked so we cannot race with uncharge.
*/
void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc;
struct zone *zone;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
unsigned long flags;

if (mem_cgroup_disabled())
return;

pc = lookup_page_cgroup(oldpage);
/* fix accounting on old pages */
lock_page_cgroup(pc);
memcg = pc->mem_cgroup;
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
ClearPageCgroupUsed(pc);
unlock_page_cgroup(pc);

if (PageSwapBacked(oldpage))
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;

zone = page_zone(newpage);
pc = lookup_page_cgroup(newpage);
/*
* Even if newpage->mapping was NULL before starting replacement,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(newpage))
del_page_from_lru_list(zone, newpage, page_lru(newpage));
__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
if (PageLRU(newpage))
add_page_to_lru_list(zone, newpage, page_lru(newpage));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}

#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
Expand Down

0 comments on commit 51828b6

Please sign in to comment.