Skip to content

Commit

Permalink
memcg: unify memcg stat flushing
Browse files Browse the repository at this point in the history
The memcg stats can be flushed in multiple context and potentially in
parallel too.  For example multiple parallel user space readers for
memcg stats will contend on the rstat locks with each other.  There is
no need for that.  We just need one flusher and everyone else can
benefit.

In addition after aa48e47 ("memcg: infrastructure to flush memcg
stats") the kernel periodically flush the memcg stats from the root, so,
the other flushers will potentially have much less work to do.

Link: https://lkml.kernel.org/r/20211001190040.48086-2-shakeelb@google.com
Signed-off-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: "Michal Koutný" <mkoutny@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Shakeel Butt authored and Linus Torvalds committed Nov 6, 2021
1 parent 11192d9 commit fd25a9e
Showing 1 changed file with 10 additions and 9 deletions.
19 changes: 10 additions & 9 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -660,12 +660,14 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg)

static void __mem_cgroup_flush_stats(void)
{
if (!spin_trylock(&stats_flush_lock))
unsigned long flag;

if (!spin_trylock_irqsave(&stats_flush_lock, flag))
return;

cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
atomic_set(&stats_flush_threshold, 0);
spin_unlock(&stats_flush_lock);
spin_unlock_irqrestore(&stats_flush_lock, flag);
}

void mem_cgroup_flush_stats(void)
Expand Down Expand Up @@ -1461,7 +1463,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
*
* Current memory state:
*/
cgroup_rstat_flush(memcg->css.cgroup);
mem_cgroup_flush_stats();

for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
u64 size;
Expand Down Expand Up @@ -3565,8 +3567,7 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
unsigned long val;

if (mem_cgroup_is_root(memcg)) {
/* mem_cgroup_threshold() calls here from irqsafe context */
cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
mem_cgroup_flush_stats();
val = memcg_page_state(memcg, NR_FILE_PAGES) +
memcg_page_state(memcg, NR_ANON_MAPPED);
if (swap)
Expand Down Expand Up @@ -3947,7 +3948,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
int nid;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);

cgroup_rstat_flush(memcg->css.cgroup);
mem_cgroup_flush_stats();

for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
seq_printf(m, "%s=%lu", stat->name,
Expand Down Expand Up @@ -4019,7 +4020,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)

BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));

cgroup_rstat_flush(memcg->css.cgroup);
mem_cgroup_flush_stats();

for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr;
Expand Down Expand Up @@ -4522,7 +4523,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;

cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
mem_cgroup_flush_stats();

*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
Expand Down Expand Up @@ -6405,7 +6406,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
int i;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);

cgroup_rstat_flush(memcg->css.cgroup);
mem_cgroup_flush_stats();

for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
int nid;
Expand Down

0 comments on commit fd25a9e

Please sign in to comment.