Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 288246
b: refs/heads/master
c: 7512102
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Mar 5, 2012
1 parent 956d564 commit b14abd7
Show file tree
Hide file tree
Showing 7 changed files with 19 additions and 49 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9f78ff005a6b6313728247113948450b2adddde8
refs/heads/master: 7512102cf64d36e3c7444480273623c7aab3563f
5 changes: 0 additions & 5 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);

extern void mem_cgroup_reset_owner(struct page *page);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account;
#endif
Expand Down Expand Up @@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
}

static inline void mem_cgroup_reset_owner(struct page *page)
{
}
#endif /* CONFIG_CGROUP_MEM_CONT */

#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
Expand Down
11 changes: 0 additions & 11 deletions trunk/mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/memcontrol.h>
#include <linux/rbtree.h>
#include <linux/memory.h>
#include <linux/mmu_notifier.h>
Expand Down Expand Up @@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,

new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
/*
* The memcg-specific accounting when moving
* pages around the LRU lists relies on the
* page's owner (memcg) to be valid. Usually,
* pages are assigned to a new owner before
* being put on the LRU list, but since this
* is not the case here, the stale owner from
* a previous allocation cycle must be reset.
*/
mem_cgroup_reset_owner(new_page);
copy_user_highpage(new_page, page, address, vma);

SetPageDirty(new_page);
Expand Down
30 changes: 13 additions & 17 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,

pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;

/*
* Surreptitiously switch any uncharged page to root:
* an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal.
*
* Our caller holds lru_lock, and PageCgroupUsed is updated
* under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe.
*/
if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup;

mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
Expand Down Expand Up @@ -3029,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
batch->memcg = NULL;
}

/*
* A function for resetting pc->mem_cgroup for newly allocated pages.
* This function should be called if the newpage will be added to LRU
* before start accounting.
*/
void mem_cgroup_reset_owner(struct page *newpage)
{
struct page_cgroup *pc;

if (mem_cgroup_disabled())
return;

pc = lookup_page_cgroup(newpage);
VM_BUG_ON(PageCgroupUsed(pc));
pc->mem_cgroup = root_mem_cgroup;
}

#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
Expand Down
2 changes: 0 additions & 2 deletions trunk/mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (!newpage)
return -ENOMEM;

mem_cgroup_reset_owner(newpage);

if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
Expand Down
8 changes: 5 additions & 3 deletions trunk/mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail)
{
int active;
int uninitialized_var(active);
enum lru_list lru;
const int file = 0;

Expand All @@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
active = 0;
lru = LRU_INACTIVE_ANON;
}
update_page_reclaim_stat(zone, page_tail, file, active);
} else {
SetPageUnevictable(page_tail);
lru = LRU_UNEVICTABLE;
Expand All @@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}

if (!PageUnevictable(page))
update_page_reclaim_stat(zone, page_tail, file, active);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

Expand All @@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page);
if (active)
SetPageActive(page);
update_page_reclaim_stat(zone, page, file, active);
add_page_to_lru_list(zone, page, lru);
update_page_reclaim_stat(zone, page, file, active);
}

/*
Expand Down
10 changes: 0 additions & 10 deletions trunk/mm/swap_state.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
/*
* The memcg-specific accounting when moving
* pages around the LRU lists relies on the
* page's owner (memcg) to be valid. Usually,
* pages are assigned to a new owner before
* being put on the LRU list, but since this
* is not the case here, the stale owner from
* a previous allocation cycle must be reset.
*/
mem_cgroup_reset_owner(new_page);
}

/*
Expand Down

0 comments on commit b14abd7

Please sign in to comment.