Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 127575
b: refs/heads/master
c: 2c26fdd
h: refs/heads/master
i:
  127573: 96f5a99
  127571: dda3ef6
  127567: 3fc64e6
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Jan 8, 2009
1 parent ca159ae commit 0b6b242
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 887007561ae58628f03aa9046949747c04f63be8
refs/heads/master: 2c26fdd70c3094fa3e84caf9ef434911933d5477
10 changes: 10 additions & 0 deletions trunk/include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,16 @@ struct page;
struct mm_struct;

#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
* (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
* alloc memory but reclaims memory from all available zones. So, "where I want
* memory from" bits of gfp_mask has no meaning. So any bits of that field is
* available but adding a rule is better. charge functions' gfp_mask should
* be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
* codes.
* (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
*/

extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
Expand Down
2 changes: 1 addition & 1 deletion trunk/mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
VM_BUG_ON(!PageLocked(page));

error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & ~__GFP_HIGHMEM);
gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;

Expand Down
10 changes: 5 additions & 5 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
unlock_page_cgroup(pc);

if (mem) {
ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
css_put(&mem->css);
}
*ptr = mem;
Expand Down Expand Up @@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;

progress = try_to_free_mem_cgroup_pages(memcg,
GFP_HIGHUSER_MOVABLE, false);
GFP_KERNEL, false);
if (!progress) retry_count--;
}
return ret;
Expand Down Expand Up @@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;

oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
if (curusage >= oldusage)
retry_count--;
Expand Down Expand Up @@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
}
spin_unlock_irqrestore(&zone->lru_lock, flags);

ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
if (ret == -ENOMEM)
break;

Expand Down Expand Up @@ -1550,7 +1550,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
goto out;
}
progress = try_to_free_mem_cgroup_pages(mem,
GFP_HIGHUSER_MOVABLE, false);
GFP_KERNEL, false);
if (!progress) {
nr_retries--;
/* maybe some writeback is necessary */
Expand Down
10 changes: 4 additions & 6 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);

if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;

/*
Expand Down Expand Up @@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);

if (mem_cgroup_try_charge_swapin(mm, page,
GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
unlock_page(page);
goto out;
Expand Down Expand Up @@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom;
__SetPageUptodate(page);

if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page;

entry = mk_pte(page, vma->vm_page_prot);
Expand Down Expand Up @@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
if (mem_cgroup_newpage_charge(page,
mm, GFP_HIGHUSER_MOVABLE)) {
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
ret = VM_FAULT_OOM;
page_cache_release(page);
goto out;
Expand Down
8 changes: 4 additions & 4 deletions trunk/mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -932,8 +932,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
* Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
* charged back to the user(not to caller) when swap account is used.
*/
error = mem_cgroup_cache_charge_swapin(page,
current->mm, GFP_HIGHUSER_MOVABLE, true);
error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL,
true);
if (error)
goto out;
error = radix_tree_preload(GFP_KERNEL);
Expand Down Expand Up @@ -1275,7 +1275,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
* charge against this swap cache here.
*/
if (mem_cgroup_cache_charge_swapin(swappage,
current->mm, gfp, false)) {
current->mm, gfp & GFP_RECLAIM_MASK, false)) {
page_cache_release(swappage);
error = -ENOMEM;
goto failed;
Expand Down Expand Up @@ -1393,7 +1393,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,

/* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm,
GFP_HIGHUSER_MOVABLE);
GFP_KERNEL);
if (error) {
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);
Expand Down
3 changes: 1 addition & 2 deletions trunk/mm/swapfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte_t *pte;
int ret = 1;

if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
GFP_HIGHUSER_MOVABLE, &ptr))
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
ret = -ENOMEM;

pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Expand Down

0 comments on commit 0b6b242

Please sign in to comment.