From 0b6b2425ba37a3fc211bc4df32002fd0d2a81c17 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:10 -0800 Subject: [PATCH] --- yaml --- r: 127575 b: refs/heads/master c: 2c26fdd70c3094fa3e84caf9ef434911933d5477 h: refs/heads/master i: 127573: 96f5a99f14ca0a8cf557e1bbf68153bbdc40c5c0 127571: dda3ef6612a99efd3e2ebfa1bccb150d0548729a 127567: 3fc64e610e735aff5393d559f573e4700b304874 v: v3 --- [refs] | 2 +- trunk/include/linux/memcontrol.h | 10 ++++++++++ trunk/mm/filemap.c | 2 +- trunk/mm/memcontrol.c | 10 +++++----- trunk/mm/memory.c | 10 ++++------ trunk/mm/shmem.c | 8 ++++---- trunk/mm/swapfile.c | 3 +-- 7 files changed, 26 insertions(+), 19 deletions(-) diff --git a/[refs] b/[refs] index 7ff8db3a30bc..f54abe0aad60 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 887007561ae58628f03aa9046949747c04f63be8 +refs/heads/master: 2c26fdd70c3094fa3e84caf9ef434911933d5477 diff --git a/trunk/include/linux/memcontrol.h b/trunk/include/linux/memcontrol.h index 2fdd1380bf0a..59ac95a64508 100644 --- a/trunk/include/linux/memcontrol.h +++ b/trunk/include/linux/memcontrol.h @@ -26,6 +26,16 @@ struct page; struct mm_struct; #ifdef CONFIG_CGROUP_MEM_RES_CTLR +/* + * All "charge" functions with gfp_mask should use GFP_KERNEL or + * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't + * alloc memory but reclaims memory from all available zones. So, "where I want + * memory from" bits of gfp_mask has no meaning. So any bits of that field is + * available but adding a rule is better. charge functions' gfp_mask should + * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous + * codes. + * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) + */ extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index 2f55a1e2baf7..ceba0bd03662 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, VM_BUG_ON(!PageLocked(page)); error = mem_cgroup_cache_charge(page, current->mm, - gfp_mask & ~__GFP_HIGHMEM); + gfp_mask & GFP_RECLAIM_MASK); if (error) goto out; diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index 9bf5d7c8ede7..b9cd57b667d6 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) unlock_page_cgroup(pc); if (mem) { - ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); + ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem); css_put(&mem->css); } *ptr = mem; @@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, break; progress = try_to_free_mem_cgroup_pages(memcg, - GFP_HIGHUSER_MOVABLE, false); + GFP_KERNEL, false); if (!progress) retry_count--; } return ret; @@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, break; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true); + try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true); curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); if (curusage >= oldusage) retry_count--; @@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, } spin_unlock_irqrestore(&zone->lru_lock, flags); - ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); + ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL); if (ret == -ENOMEM) break; @@ -1550,7 +1550,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) goto out; } progress = try_to_free_mem_cgroup_pages(mem, - GFP_HIGHUSER_MOVABLE, false); + GFP_KERNEL, false); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 1358012ffa73..e5bfbe6b594c 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, cow_user_page(new_page, old_page, address, vma); __SetPageUptodate(new_page); - if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) + if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) goto oom_free_new; /* @@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - if (mem_cgroup_try_charge_swapin(mm, page, - GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { + if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { ret = VM_FAULT_OOM; unlock_page(page); goto out; @@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, goto oom; __SetPageUptodate(page); - if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) goto oom_free_page; entry = mk_pte(page, vma->vm_page_prot); @@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ret = VM_FAULT_OOM; goto out; } - if (mem_cgroup_newpage_charge(page, - mm, GFP_HIGHUSER_MOVABLE)) { + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { ret = VM_FAULT_OOM; page_cache_release(page); goto out; diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index adf5c3eedbc9..bbb7b043c986 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -932,8 +932,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s * Charge page using GFP_HIGHUSER_MOVABLE while we can wait. * charged back to the user(not to caller) when swap account is used. */ - error = mem_cgroup_cache_charge_swapin(page, - current->mm, GFP_HIGHUSER_MOVABLE, true); + error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL, + true); if (error) goto out; error = radix_tree_preload(GFP_KERNEL); @@ -1275,7 +1275,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, * charge against this swap cache here. */ if (mem_cgroup_cache_charge_swapin(swappage, - current->mm, gfp, false)) { + current->mm, gfp & GFP_RECLAIM_MASK, false)) { page_cache_release(swappage); error = -ENOMEM; goto failed; @@ -1393,7 +1393,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, /* Precharge page while we can wait, compensate after */ error = mem_cgroup_cache_charge(filepage, current->mm, - GFP_HIGHUSER_MOVABLE); + GFP_KERNEL); if (error) { page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); diff --git a/trunk/mm/swapfile.c b/trunk/mm/swapfile.c index 0579d9069b61..da422c47e2ee 100644 --- a/trunk/mm/swapfile.c +++ b/trunk/mm/swapfile.c @@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pte_t *pte; int ret = 1; - if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, - GFP_HIGHUSER_MOVABLE, &ptr)) + if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) ret = -ENOMEM; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);