Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 187223
b: refs/heads/master
c: c62b1a3
h: refs/heads/master
i:
  187221: c7cf04f
  187219: c6e958c
  187215: e003ff9
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed Mar 12, 2010
1 parent b6928b5 commit 61ec4b7
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 122 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6a6135b64fda39d931a79090f4da37f1c6da4a8c
refs/heads/master: c62b1a3b31b5e27a6c5c2e91cc5ce05fdb6344d0
184 changes: 63 additions & 121 deletions trunk/mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,54 +89,8 @@ enum mem_cgroup_stat_index {

struct mem_cgroup_stat_cpu {
s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
struct mem_cgroup_stat_cpu cpustat[0];
};

static inline void
__mem_cgroup_stat_set_safe(struct mem_cgroup_stat_cpu *stat,
enum mem_cgroup_stat_index idx, s64 val)
{
stat->count[idx] = val;
}

static inline s64
__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
enum mem_cgroup_stat_index idx)
{
return stat->count[idx];
}

/*
* For accounting under irq disable, no need for increment preempt count.
*/
static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
enum mem_cgroup_stat_index idx, int val)
{
stat->count[idx] += val;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
enum mem_cgroup_stat_index idx)
{
int cpu;
s64 ret = 0;
for_each_possible_cpu(cpu)
ret += stat->cpustat[cpu].count[idx];
return ret;
}

static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
{
s64 ret;

ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
return ret;
}

/*
* per-zone information in memory controller.
*/
Expand Down Expand Up @@ -270,9 +224,9 @@ struct mem_cgroup {
unsigned long move_charge_at_immigrate;

/*
* statistics. This must be placed at the end of memcg.
* percpu counter.
*/
struct mem_cgroup_stat stat;
struct mem_cgroup_stat_cpu *stat;
};

/* Stuffs for move charges at task migration. */
Expand Down Expand Up @@ -441,19 +395,14 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
{
bool ret = false;
int cpu;
s64 val;
struct mem_cgroup_stat_cpu *cpustat;

cpu = get_cpu();
cpustat = &mem->stat.cpustat[cpu];
val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_SOFTLIMIT);
val = this_cpu_read(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT]);
if (unlikely(val < 0)) {
__mem_cgroup_stat_set_safe(cpustat, MEM_CGROUP_STAT_SOFTLIMIT,
this_cpu_write(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT],
SOFTLIMIT_EVENTS_THRESH);
ret = true;
}
put_cpu();
return ret;
}

Expand Down Expand Up @@ -549,44 +498,54 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
return mz;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
enum mem_cgroup_stat_index idx)
{
int cpu;
s64 val = 0;

for_each_possible_cpu(cpu)
val += per_cpu(mem->stat->count[idx], cpu);
return val;
}

static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
{
s64 ret;

ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
return ret;
}

static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
bool charge)
{
int val = (charge) ? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat;
struct mem_cgroup_stat_cpu *cpustat;
int cpu = get_cpu();

cpustat = &stat->cpustat[cpu];
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
put_cpu();
this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
}

static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
struct page_cgroup *pc,
bool charge)
{
int val = (charge) ? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat;
struct mem_cgroup_stat_cpu *cpustat;
int cpu = get_cpu();

cpustat = &stat->cpustat[cpu];
preempt_disable();

if (PageCgroupCache(pc))
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
else
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);

if (charge)
__mem_cgroup_stat_add_safe(cpustat,
MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
else
__mem_cgroup_stat_add_safe(cpustat,
MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SOFTLIMIT, -1);
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_THRESHOLDS, -1);
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
__this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT]);
__this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS]);

put_cpu();
preempt_enable();
}

static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
Expand Down Expand Up @@ -1244,7 +1203,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
}
}
}
if (!mem_cgroup_local_usage(&victim->stat)) {
if (!mem_cgroup_local_usage(victim)) {
/* this cgroup's local usage == 0 */
css_put(&victim->css);
continue;
Expand Down Expand Up @@ -1310,9 +1269,6 @@ static void record_last_oom(struct mem_cgroup *mem)
void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
struct mem_cgroup_stat *stat;
struct mem_cgroup_stat_cpu *cpustat;
int cpu;
struct page_cgroup *pc;

pc = lookup_page_cgroup(page);
Expand All @@ -1328,13 +1284,10 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
goto done;

/*
* Preemption is already disabled, we don't need get_cpu()
* Preemption is already disabled. We can use __this_cpu_xxx
*/
cpu = smp_processor_id();
stat = &mem->stat;
cpustat = &stat->cpustat[cpu];
__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val);

__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
done:
unlock_page_cgroup(pc);
}
Expand Down Expand Up @@ -1761,9 +1714,6 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
struct page *page;
int cpu;
struct mem_cgroup_stat *stat;
struct mem_cgroup_stat_cpu *cpustat;

VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
Expand All @@ -1773,18 +1723,11 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,

page = pc->page;
if (page_mapped(page) && !PageAnon(page)) {
cpu = smp_processor_id();
/* Update mapped_file data for mem_cgroup "from" */
stat = &from->stat;
cpustat = &stat->cpustat[cpu];
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
-1);

/* Update mapped_file data for mem_cgroup "to" */
stat = &to->stat;
cpustat = &stat->cpustat[cpu];
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1);
/* Update mapped_file data for mem_cgroup */
preempt_disable();
__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
preempt_enable();
}
mem_cgroup_charge_statistics(from, pc, false);
if (uncharge)
Expand Down Expand Up @@ -2885,7 +2828,7 @@ static int
mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
{
struct mem_cgroup_idx_data *d = data;
d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
d->val += mem_cgroup_read_stat(mem, d->idx);
return 0;
}

Expand Down Expand Up @@ -3134,18 +3077,18 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s64 val;

/* per cpu stat */
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
s->stat[MCS_PGPGOUT] += val;
if (do_swap_account) {
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
s->stat[MCS_SWAP] += val * PAGE_SIZE;
}

Expand Down Expand Up @@ -3276,19 +3219,14 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
static bool mem_cgroup_threshold_check(struct mem_cgroup *mem)
{
bool ret = false;
int cpu;
s64 val;
struct mem_cgroup_stat_cpu *cpustat;

cpu = get_cpu();
cpustat = &mem->stat.cpustat[cpu];
val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_THRESHOLDS);
val = this_cpu_read(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS]);
if (unlikely(val < 0)) {
__mem_cgroup_stat_set_safe(cpustat, MEM_CGROUP_STAT_THRESHOLDS,
this_cpu_write(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS],
THRESHOLDS_EVENTS_THRESH);
ret = true;
}
put_cpu();
return ret;
}

Expand Down Expand Up @@ -3676,24 +3614,27 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]);
}

static int mem_cgroup_size(void)
{
int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
return sizeof(struct mem_cgroup) + cpustat_size;
}

static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *mem;
int size = mem_cgroup_size();
int size = sizeof(struct mem_cgroup);

/* Can be very big if MAX_NUMNODES is very big */
if (size < PAGE_SIZE)
mem = kmalloc(size, GFP_KERNEL);
else
mem = vmalloc(size);

if (mem)
memset(mem, 0, size);
mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
if (!mem->stat) {
if (size < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
mem = NULL;
}
return mem;
}

Expand All @@ -3718,7 +3659,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);

if (mem_cgroup_size() < PAGE_SIZE)
free_percpu(mem->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
Expand Down

0 comments on commit 61ec4b7

Please sign in to comment.