Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 166650
b: refs/heads/master
c: 4e64915
h: refs/heads/master
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Oct 1, 2009
1 parent b0f4311 commit c427eb3
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 84 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3dece8347df6a16239fab10dadb370854f1c969c
refs/heads/master: 4e649152cbaa1aedd01821d200ab9d597fe469e4
6 changes: 2 additions & 4 deletions trunk/include/linux/res_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val);
int __must_check res_counter_charge(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at,
struct res_counter **soft_limit_at);
unsigned long val, struct res_counter **limit_fail_at);

/*
* uncharge - tell that some portion of the resource is released
Expand All @@ -128,8 +127,7 @@ int __must_check res_counter_charge(struct res_counter *counter,
*/

void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
void res_counter_uncharge(struct res_counter *counter, unsigned long val,
bool *was_soft_limit_excess);
void res_counter_uncharge(struct res_counter *counter, unsigned long val);

static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
{
Expand Down
18 changes: 2 additions & 16 deletions trunk/kernel/res_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,27 +37,17 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
}

int res_counter_charge(struct res_counter *counter, unsigned long val,
struct res_counter **limit_fail_at,
struct res_counter **soft_limit_fail_at)
struct res_counter **limit_fail_at)
{
int ret;
unsigned long flags;
struct res_counter *c, *u;

*limit_fail_at = NULL;
if (soft_limit_fail_at)
*soft_limit_fail_at = NULL;
local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
ret = res_counter_charge_locked(c, val);
/*
* With soft limits, we return the highest ancestor
* that exceeds its soft limit
*/
if (soft_limit_fail_at &&
!res_counter_soft_limit_check_locked(c))
*soft_limit_fail_at = c;
spin_unlock(&c->lock);
if (ret < 0) {
*limit_fail_at = c;
Expand Down Expand Up @@ -85,18 +75,14 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
counter->usage -= val;
}

void res_counter_uncharge(struct res_counter *counter, unsigned long val,
bool *was_soft_limit_excess)
void res_counter_uncharge(struct res_counter *counter, unsigned long val)
{
unsigned long flags;
struct res_counter *c;

local_irq_save(flags);
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
if (was_soft_limit_excess)
*was_soft_limit_excess =
!res_counter_soft_limit_check_locked(c);
res_counter_uncharge_locked(c, val);
spin_unlock(&c->lock);
}
Expand Down
113 changes: 50 additions & 63 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -352,16 +352,6 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
mz->on_tree = false;
}

static void
mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz)
{
spin_lock(&mctz->lock);
__mem_cgroup_insert_exceeded(mem, mz, mctz);
spin_unlock(&mctz->lock);
}

static void
mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz,
Expand Down Expand Up @@ -392,34 +382,40 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)

static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
{
unsigned long long prev_usage_in_excess, new_usage_in_excess;
bool updated_tree = false;
unsigned long long new_usage_in_excess;
struct mem_cgroup_per_zone *mz;
struct mem_cgroup_tree_per_zone *mctz;

mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page));
int nid = page_to_nid(page);
int zid = page_zonenum(page);
mctz = soft_limit_tree_from_page(page);

/*
* We do updates in lazy mode, mem's are removed
* lazily from the per-zone, per-node rb tree
* Necessary to update all ancestors when hierarchy is used.
* because their event counter is not touched.
*/
prev_usage_in_excess = mz->usage_in_excess;

new_usage_in_excess = res_counter_soft_limit_excess(&mem->res);
if (prev_usage_in_excess) {
mem_cgroup_remove_exceeded(mem, mz, mctz);
updated_tree = true;
}
if (!new_usage_in_excess)
goto done;
mem_cgroup_insert_exceeded(mem, mz, mctz);

done:
if (updated_tree) {
spin_lock(&mctz->lock);
mz->usage_in_excess = new_usage_in_excess;
spin_unlock(&mctz->lock);
for (; mem; mem = parent_mem_cgroup(mem)) {
mz = mem_cgroup_zoneinfo(mem, nid, zid);
new_usage_in_excess =
res_counter_soft_limit_excess(&mem->res);
/*
* We have to update the tree if mz is on RB-tree or
* mem is over its softlimit.
*/
if (new_usage_in_excess || mz->on_tree) {
spin_lock(&mctz->lock);
/* if on-tree, remove it */
if (mz->on_tree)
__mem_cgroup_remove_exceeded(mem, mz, mctz);
/*
* if over soft limit, insert again. mz->usage_in_excess
* will be updated properly.
*/
if (new_usage_in_excess)
__mem_cgroup_insert_exceeded(mem, mz, mctz);
else
mz->usage_in_excess = 0;
spin_unlock(&mctz->lock);
}
}
}

Expand Down Expand Up @@ -1271,9 +1267,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcg,
bool oom, struct page *page)
{
struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit;
struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct res_counter *fail_res, *soft_fail_res = NULL;
struct res_counter *fail_res;

if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
Expand Down Expand Up @@ -1305,17 +1301,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,

if (mem_cgroup_is_root(mem))
goto done;
ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res,
&soft_fail_res);
ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
&fail_res, NULL);
&fail_res);
if (likely(!ret))
break;
/* mem+swap counter fails */
res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
res_counter_uncharge(&mem->res, PAGE_SIZE);
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
memsw);
Expand Down Expand Up @@ -1354,16 +1349,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
}
}
/*
* Insert just the ancestor, we should trickle down to the correct
* cgroup for reclaim, since the other nodes will be below their
* soft limit
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
if (soft_fail_res) {
mem_over_soft_limit =
mem_cgroup_from_res_counter(soft_fail_res, res);
if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
mem_cgroup_update_tree(mem_over_soft_limit, page);
}
if (mem_cgroup_soft_limit_check(mem))
mem_cgroup_update_tree(mem, page);
done:
return 0;
nomem:
Expand Down Expand Up @@ -1438,10 +1428,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
if (!mem_cgroup_is_root(mem)) {
res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account)
res_counter_uncharge(&mem->memsw, PAGE_SIZE,
NULL);
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
}
css_put(&mem->css);
return;
Expand Down Expand Up @@ -1520,7 +1509,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
goto out;

if (!mem_cgroup_is_root(from))
res_counter_uncharge(&from->res, PAGE_SIZE, NULL);
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);

page = pc->page;
Expand All @@ -1540,7 +1529,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
}

if (do_swap_account && !mem_cgroup_is_root(from))
res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL);
res_counter_uncharge(&from->memsw, PAGE_SIZE);
css_put(&from->css);

css_get(&to->css);
Expand Down Expand Up @@ -1611,9 +1600,9 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
css_put(&parent->css);
/* uncharge if move fails */
if (!mem_cgroup_is_root(parent)) {
res_counter_uncharge(&parent->res, PAGE_SIZE, NULL);
res_counter_uncharge(&parent->res, PAGE_SIZE);
if (do_swap_account)
res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL);
res_counter_uncharge(&parent->memsw, PAGE_SIZE);
}
return ret;
}
Expand Down Expand Up @@ -1804,8 +1793,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
* calling css_tryget
*/
if (!mem_cgroup_is_root(memcg))
res_counter_uncharge(&memcg->memsw, PAGE_SIZE,
NULL);
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
Expand All @@ -1832,9 +1820,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
if (!mem)
return;
if (!mem_cgroup_is_root(mem)) {
res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account)
res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
}
css_put(&mem->css);
}
Expand All @@ -1849,7 +1837,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
struct mem_cgroup_per_zone *mz;
bool soft_limit_excess = false;

if (mem_cgroup_disabled())
return NULL;
Expand Down Expand Up @@ -1889,10 +1876,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
}

if (!mem_cgroup_is_root(mem)) {
res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess);
res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account &&
(ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
}
if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
mem_cgroup_swap_statistics(mem, true);
Expand All @@ -1909,7 +1896,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc);

if (soft_limit_excess && mem_cgroup_soft_limit_check(mem))
if (mem_cgroup_soft_limit_check(mem))
mem_cgroup_update_tree(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
Expand Down Expand Up @@ -1987,7 +1974,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* This memcg can be obsolete one. We avoid calling css_tryget
*/
if (!mem_cgroup_is_root(memcg))
res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL);
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
Expand Down

0 comments on commit c427eb3

Please sign in to comment.