Skip to content

Commit

Permalink
mm: memcontrol: move stat/event counting functions out-of-line
Browse files Browse the repository at this point in the history
These are getting too big to be inlined in every callsite.  They were
stolen from vmstat.c, which already out-of-lines them, and they have
only been growing since.  The callsites aren't that hot, either.

Move __mod_memcg_state()
     __mod_lruvec_state() and
     __count_memcg_events() out of line and add kerneldoc comments.

Link: http://lkml.kernel.org/r/20190412151507.2769-3-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Roman Gushchin <guro@fb.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed May 15, 2019
1 parent 205b20c commit db9adbc
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 57 deletions.
62 changes: 5 additions & 57 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -565,22 +565,7 @@ static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
return x;
}

/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
{
long x;

if (mem_cgroup_disabled())
return;

x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->vmstats[idx]);
x = 0;
}
__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
}
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);

/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
Expand Down Expand Up @@ -642,31 +627,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}

static inline void __mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
struct mem_cgroup_per_node *pn;
long x;

/* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);

if (mem_cgroup_disabled())
return;

pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);

/* Update memcg */
__mod_memcg_state(pn->memcg, idx, val);

/* Update lruvec */
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &pn->lruvec_stat[idx]);
x = 0;
}
__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);

static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
Expand Down Expand Up @@ -708,22 +670,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);

static inline void __count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
unsigned long x;

if (mem_cgroup_disabled())
return;

x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
if (unlikely(x > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->vmevents[idx]);
x = 0;
}
__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
}
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count);

static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
Expand Down
79 changes: 79 additions & 0 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -687,6 +687,85 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
return mz;
}

/**
* __mod_memcg_state - update cgroup memory statistics
* @memcg: the memory cgroup
* @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
* @val: delta to add to the counter, can be negative
*/
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
{
long x;

if (mem_cgroup_disabled())
return;

x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->vmstats[idx]);
x = 0;
}
__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
}

/**
* __mod_lruvec_state - update lruvec memory statistics
* @lruvec: the lruvec
* @idx: the stat item
* @val: delta to add to the counter, can be negative
*
* The lruvec is the intersection of the NUMA node and a cgroup. This
* function updates the all three counters that are affected by a
* change of state at this level: per-node, per-cgroup, per-lruvec.
*/
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val)
{
struct mem_cgroup_per_node *pn;
long x;

/* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);

if (mem_cgroup_disabled())
return;

pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);

/* Update memcg */
__mod_memcg_state(pn->memcg, idx, val);

/* Update lruvec */
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &pn->lruvec_stat[idx]);
x = 0;
}
__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}

/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
* @idx: the event item
* @count: the number of events that occured
*/
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count)
{
unsigned long x;

if (mem_cgroup_disabled())
return;

x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
if (unlikely(x > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->vmevents[idx]);
x = 0;
}
__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
}

static unsigned long memcg_events_local(struct mem_cgroup *memcg,
int event)
{
Expand Down

0 comments on commit db9adbc

Please sign in to comment.