Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105909
b: refs/heads/master
c: cede86a
h: refs/heads/master
i:
  105907: 076a587
v: v3
  • Loading branch information
Li Zefan authored and Linus Torvalds committed Jul 25, 2008
1 parent 8646cc8 commit 51078ab
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: accf163e6ab729f1fc5fffaa0310e498270bf4e7
refs/heads/master: cede86acd8bd5d2205dec28db8ac86410a3a19e8
23 changes: 12 additions & 11 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,9 @@ void mem_cgroup_move_lists(struct page *page, bool active)
struct mem_cgroup_per_zone *mz;
unsigned long flags;

if (mem_cgroup_subsys.disabled)
return;

/*
* We cannot lock_page_cgroup while holding zone's lru_lock,
* because other holders of lock_page_cgroup can be interrupted
Expand Down Expand Up @@ -533,9 +536,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz;

if (mem_cgroup_subsys.disabled)
return 0;

pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
if (unlikely(pc == NULL))
goto err;
Expand Down Expand Up @@ -620,6 +620,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,

int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{
if (mem_cgroup_subsys.disabled)
return 0;

/*
* If already mapped, we don't have to account.
* If page cache, page->mapping has address_space.
Expand All @@ -638,6 +641,9 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
if (mem_cgroup_subsys.disabled)
return 0;

/*
* Corner case handling. This is called from add_to_page_cache()
* in usual. But some FS (shmem) precharges this page before calling it
Expand Down Expand Up @@ -788,6 +794,9 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
int progress = 0;
int retry = MEM_CGROUP_RECLAIM_RETRIES;

if (mem_cgroup_subsys.disabled)
return 0;

rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
css_get(&mem->css);
Expand Down Expand Up @@ -857,9 +866,6 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
int ret = -EBUSY;
int node, zid;

if (mem_cgroup_subsys.disabled)
return 0;

css_get(&mem->css);
/*
* page reclaim code (kswapd etc..) will move pages between
Expand Down Expand Up @@ -1103,8 +1109,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
if (mem_cgroup_subsys.disabled)
return 0;
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
Expand All @@ -1117,9 +1121,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;

if (mem_cgroup_subsys.disabled)
return;

mm = get_task_mm(p);
if (mm == NULL)
return;
Expand Down

0 comments on commit 51078ab

Please sign in to comment.