Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 260404
b: refs/heads/master
c: d38144b
h: refs/heads/master
v: v3
  • Loading branch information
Michal Hocko authored and Linus Torvalds committed Jul 26, 2011
1 parent 895b498 commit 91d4aa5
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d1a05b6973c7cb33144fa965d73facc708ffc37d
refs/heads/master: d38144b7a5f8d0a5e05d549177191374c6911009
48 changes: 34 additions & 14 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2154,19 +2154,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
}

/*
* Tries to drain stocked charges in other cpus. This function is asynchronous
* and just put a work per cpu for draining localy on each cpu. Caller can
* expects some charges will be back to res_counter later but cannot wait for
* it.
* Drains all per-CPU charge caches for given root_mem resp. subtree
* of the hierarchy under it. sync flag says whether we should block
* until the work is done.
*/
static void drain_all_stock_async(struct mem_cgroup *root_mem)
static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
{
int cpu, curcpu;
/*
* If someone calls draining, avoid adding more kworker runs.
*/
if (!mutex_trylock(&percpu_charge_mutex))
return;

/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
/*
Expand Down Expand Up @@ -2197,17 +2192,42 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
schedule_work_on(cpu, &stock->work);
}
}

if (!sync)
goto out;

for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
flush_work(&stock->work);
}
out:
put_online_cpus();
}

/*
* Tries to drain stocked charges in other cpus. This function is asynchronous
* and just put a work per cpu for draining localy on each cpu. Caller can
* expects some charges will be back to res_counter later but cannot wait for
* it.
*/
static void drain_all_stock_async(struct mem_cgroup *root_mem)
{
/*
* If someone calls draining, avoid adding more kworker runs.
*/
if (!mutex_trylock(&percpu_charge_mutex))
return;
drain_all_stock(root_mem, false);
mutex_unlock(&percpu_charge_mutex);
/* We don't wait for flush_work */
}

/* This is a synchronous drain interface. */
static void drain_all_stock_sync(void)
static void drain_all_stock_sync(struct mem_cgroup *root_mem)
{
/* called when force_empty is called */
mutex_lock(&percpu_charge_mutex);
schedule_on_each_cpu(drain_local_stock);
drain_all_stock(root_mem, true);
mutex_unlock(&percpu_charge_mutex);
}

Expand Down Expand Up @@ -3856,7 +3876,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
goto out;
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
drain_all_stock_sync();
drain_all_stock_sync(mem);
ret = 0;
mem_cgroup_start_move(mem);
for_each_node_state(node, N_HIGH_MEMORY) {
Expand Down

0 comments on commit 91d4aa5

Please sign in to comment.