Skip to content

Commit

Permalink
mm: vmscan: split shrink_node() into node part and memcgs part
Browse files Browse the repository at this point in the history
This function is getting long and unwieldy, split out the memcg bits.

The updated shrink_node() handles the generic (node) reclaim aspects:
  - global vmpressure notifications
  - writeback and congestion throttling
  - reclaim/compaction management
  - kswapd giving up on unreclaimable nodes

It then calls a new shrink_node_memcgs() which handles cgroup specifics:
  - the cgroup tree traversal
  - memory.low considerations
  - per-cgroup slab shrinking callbacks
  - per-cgroup vmpressure notifications

[hannes@cmpxchg.org: rename "root" to "target_memcg", per Roman]
  Link: http://lkml.kernel.org/r/20191025143640.GA386981@cmpxchg.org
Link: http://lkml.kernel.org/r/20191022144803.302233-8-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed Dec 1, 2019
1 parent afaf07a commit 0f6a5cf
Showing 1 changed file with 25 additions and 16 deletions.
41 changes: 25 additions & 16 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2722,26 +2722,18 @@ static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
(memcg && memcg_congested(pgdat, memcg));
}

static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
struct mem_cgroup *root = sc->target_mem_cgroup;
unsigned long nr_reclaimed, nr_scanned;
bool reclaimable = false;
struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
struct mem_cgroup *memcg;
again:
memset(&sc->nr, 0, sizeof(sc->nr));

nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;

memcg = mem_cgroup_iter(root, NULL, NULL);
memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
do {
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
unsigned long reclaimed;
unsigned long scanned;

switch (mem_cgroup_protected(root, memcg)) {
switch (mem_cgroup_protected(target_memcg, memcg)) {
case MEMCG_PROT_MIN:
/*
* Hard protection.
Expand Down Expand Up @@ -2785,15 +2777,31 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
sc->nr_scanned - scanned,
sc->nr_reclaimed - reclaimed);

} while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
}

static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
unsigned long nr_reclaimed, nr_scanned;
bool reclaimable = false;

again:
memset(&sc->nr, 0, sizeof(sc->nr));

nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;

shrink_node_memcgs(pgdat, sc);

if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}

/* Record the subtree's reclaim efficiency */
vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
vmpressure(sc->gfp_mask, target_memcg, true,
sc->nr_scanned - nr_scanned,
sc->nr_reclaimed - nr_reclaimed);

Expand Down Expand Up @@ -2849,7 +2857,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
*/
if (cgroup_reclaim(sc) && writeback_throttling_sane(sc) &&
sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
set_memcg_congestion(pgdat, root, true);
set_memcg_congestion(pgdat, target_memcg, true);

/*
* Stall direct reclaim for IO completions if underlying BDIs
Expand All @@ -2858,7 +2866,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
* the LRU too quickly.
*/
if (!sc->hibernation_mode && !current_is_kswapd() &&
current_may_throttle() && pgdat_memcg_congested(pgdat, root))
current_may_throttle() &&
pgdat_memcg_congested(pgdat, target_memcg))
wait_iff_congested(BLK_RW_ASYNC, HZ/10);

if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
Expand Down

0 comments on commit 0f6a5cf

Please sign in to comment.