Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 86821
b: refs/heads/master
c: 8869b8f
h: refs/heads/master
i:
  86819: bf7b097
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Mar 5, 2008
1 parent 97d2ed4 commit a9b7a68
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 67 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8289546e573d5ff681cdf0fc7a1184cca66fdb55
refs/heads/master: 8869b8f6e09a1b49bf915eb03f663f2e4e8fbcd4
94 changes: 28 additions & 66 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ struct mem_cgroup {
*/
struct mem_cgroup_stat stat;
};
static struct mem_cgroup init_mem_cgroup;

/*
* We use the lower bit of the page->page_cgroup pointer as a bit spin
Expand All @@ -162,7 +163,7 @@ struct page_cgroup {
struct mem_cgroup *mem_cgroup;
atomic_t ref_cnt; /* Helpful when pages move b/w */
/* mapped and cached states */
int flags;
int flags;
};
#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
Expand All @@ -177,20 +178,11 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
return page_zonenum(pc->page);
}

enum {
MEM_CGROUP_TYPE_UNSPEC = 0,
MEM_CGROUP_TYPE_MAPPED,
MEM_CGROUP_TYPE_CACHED,
MEM_CGROUP_TYPE_ALL,
MEM_CGROUP_TYPE_MAX,
};

enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
MEM_CGROUP_CHARGE_TYPE_MAPPED,
};


/*
* Always modified under lru lock. Then, not necessary to preempt_disable()
*/
Expand All @@ -199,11 +191,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
{
int val = (charge)? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat;
VM_BUG_ON(!irqs_disabled());

VM_BUG_ON(!irqs_disabled());
if (flags & PAGE_CGROUP_FLAG_CACHE)
__mem_cgroup_stat_add_safe(stat,
MEM_CGROUP_STAT_CACHE, val);
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
else
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
}
Expand Down Expand Up @@ -240,8 +231,6 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
return total;
}

static struct mem_cgroup init_mem_cgroup;

static inline
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{
Expand Down Expand Up @@ -273,8 +262,7 @@ void mm_free_cgroup(struct mm_struct *mm)

static inline int page_cgroup_locked(struct page *page)
{
return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
&page->page_cgroup);
return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
Expand All @@ -285,8 +273,7 @@ static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)

struct page_cgroup *page_get_page_cgroup(struct page *page)
{
return (struct page_cgroup *)
(page->page_cgroup & ~PAGE_CGROUP_LOCK);
return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
}

static void __always_inline lock_page_cgroup(struct page *page)
Expand All @@ -308,7 +295,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
* A can can detect failure of clearing by following
* clear_page_cgroup(page, pc) == pc
*/

static struct page_cgroup *clear_page_cgroup(struct page *page,
struct page_cgroup *pc)
{
Expand Down Expand Up @@ -417,6 +403,7 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
return (int)((rss * 100L) / total);
}

/*
* This function is called from vmscan.c. In page reclaiming loop. balance
* between active and inactive list is calculated. For memory controller
Expand Down Expand Up @@ -480,7 +467,6 @@ long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);

return (nr_inactive >> priority);
}

Expand Down Expand Up @@ -601,16 +587,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
rcu_read_lock();
mem = rcu_dereference(mm->mem_cgroup);
/*
* For every charge from the cgroup, increment reference
* count
* For every charge from the cgroup, increment reference count
*/
css_get(&mem->css);
rcu_read_unlock();

/*
* If we created the page_cgroup, we should free it on exceeding
* the cgroup limit.
*/
while (res_counter_charge(&mem->res, PAGE_SIZE)) {
if (!(gfp_mask & __GFP_WAIT))
goto out;
Expand All @@ -619,12 +600,12 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
continue;

/*
* try_to_free_mem_cgroup_pages() might not give us a full
* picture of reclaim. Some pages are reclaimed and might be
* moved to swap cache or just unmapped from the cgroup.
* Check the limit again to see if the reclaim reduced the
* current usage of the cgroup before giving up
*/
* try_to_free_mem_cgroup_pages() might not give us a full
* picture of reclaim. Some pages are reclaimed and might be
* moved to swap cache or just unmapped from the cgroup.
* Check the limit again to see if the reclaim reduced the
* current usage of the cgroup before giving up
*/
if (res_counter_check_under_limit(&mem->res))
continue;

Expand Down Expand Up @@ -660,7 +641,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,

mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags);
/* Update statistics vector */
__mem_cgroup_add_list(pc);
spin_unlock_irqrestore(&mz->lru_lock, flags);

Expand All @@ -673,26 +653,19 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
return -ENOMEM;
}

int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_MAPPED);
MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

/*
* See if the cached pages should be charged at all?
*/
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
int ret = 0;
if (!mm)
mm = &init_mm;

ret = mem_cgroup_charge_common(page, mm, gfp_mask,
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE);
return ret;
}

/*
Expand Down Expand Up @@ -742,11 +715,11 @@ void mem_cgroup_uncharge_page(struct page *page)
* Returns non-zero if a page (under migration) has valid page_cgroup member.
* Refcnt of page_cgroup is incremented.
*/

int mem_cgroup_prepare_migration(struct page *page)
{
struct page_cgroup *pc;
int ret = 0;

lock_page_cgroup(page);
pc = page_get_page_cgroup(page);
if (pc && atomic_inc_not_zero(&pc->ref_cnt))
Expand All @@ -759,28 +732,30 @@ void mem_cgroup_end_migration(struct page *page)
{
mem_cgroup_uncharge_page(page);
}

/*
* We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
* We know both *page* and *newpage* are now not-on-LRU and PG_locked.
* And no race with uncharge() routines because page_cgroup for *page*
* has extra one reference by mem_cgroup_prepare_migration.
*/

void mem_cgroup_page_migration(struct page *page, struct page *newpage)
{
struct page_cgroup *pc;
struct mem_cgroup *mem;
unsigned long flags;
struct mem_cgroup_per_zone *mz;

retry:
pc = page_get_page_cgroup(page);
if (!pc)
return;

mem = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(pc);
if (clear_page_cgroup(page, pc) != pc)
goto retry;
spin_lock_irqsave(&mz->lru_lock, flags);

spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_remove_list(pc);
spin_unlock_irqrestore(&mz->lru_lock, flags);

Expand All @@ -793,7 +768,6 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_add_list(pc);
spin_unlock_irqrestore(&mz->lru_lock, flags);
return;
}

/*
Expand All @@ -802,8 +776,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
*/
#define FORCE_UNCHARGE_BATCH (128)
static void
mem_cgroup_force_empty_list(struct mem_cgroup *mem,
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz,
int active)
{
Expand Down Expand Up @@ -837,27 +810,27 @@ mem_cgroup_force_empty_list(struct mem_cgroup *mem,
} else /* being uncharged ? ...do relax */
break;
}

spin_unlock_irqrestore(&mz->lru_lock, flags);
if (!list_empty(list)) {
cond_resched();
goto retry;
}
return;
}

/*
* make mem_cgroup's charge to be 0 if there is no task.
* This enables deleting this mem_cgroup.
*/

int mem_cgroup_force_empty(struct mem_cgroup *mem)
{
int ret = -EBUSY;
int node, zid;

css_get(&mem->css);
/*
* page reclaim code (kswapd etc..) will move pages between
` * active_list <-> inactive_list while we don't take a lock.
* active_list <-> inactive_list while we don't take a lock.
* So, we have to do loop here until all lists are empty.
*/
while (mem->res.usage > 0) {
Expand All @@ -879,8 +852,6 @@ int mem_cgroup_force_empty(struct mem_cgroup *mem)
return ret;
}



int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
{
*tmp = memparse(buf, &buf);
Expand Down Expand Up @@ -918,8 +889,7 @@ static ssize_t mem_force_empty_write(struct cgroup *cont,
size_t nbytes, loff_t *ppos)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
int ret;
ret = mem_cgroup_force_empty(mem);
int ret = mem_cgroup_force_empty(mem);
if (!ret)
ret = nbytes;
return ret;
Expand All @@ -928,7 +898,6 @@ static ssize_t mem_force_empty_write(struct cgroup *cont,
/*
* Note: This should be removed if cgroup supports write-only file.
*/

static ssize_t mem_force_empty_read(struct cgroup *cont,
struct cftype *cft,
struct file *file, char __user *userbuf,
Expand All @@ -937,7 +906,6 @@ static ssize_t mem_force_empty_read(struct cgroup *cont,
return -EINVAL;
}


static const struct mem_cgroup_stat_desc {
const char *msg;
u64 unit;
Expand Down Expand Up @@ -990,8 +958,6 @@ static int mem_control_stat_open(struct inode *unused, struct file *file)
return single_open(file, mem_control_stat_show, cont);
}



static struct cftype mem_cgroup_files[] = {
{
.name = "usage_in_bytes",
Expand Down Expand Up @@ -1057,9 +1023,6 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]);
}


static struct mem_cgroup init_mem_cgroup;

static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
Expand Down Expand Up @@ -1149,7 +1112,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,

out:
mmput(mm);
return;
}

struct cgroup_subsys mem_cgroup_subsys = {
Expand Down

0 comments on commit a9b7a68

Please sign in to comment.