Skip to content

Commit

Permalink
mm: multi-gen LRU: remove eviction fairness safeguard
Browse files Browse the repository at this point in the history
Recall that the eviction consumes the oldest generation: first it
bucket-sorts folios whose gen counters were updated by the aging and
reclaims the rest; then it increments lrugen->min_seq.

The current eviction fairness safeguard for global reclaim has a
dilemma: when there are multiple eligible memcgs, should it continue
or stop upon meeting the reclaim goal? If it continues, it overshoots
and increases direct reclaim latency; if it stops, it loses fairness
between memcgs it has taken memory away from and those it has yet to.

With memcg LRU, the eviction, while ensuring eventual fairness, will
stop upon meeting its goal. Therefore the current eviction fairness
safeguard for global reclaim will not be needed.

Note that memcg LRU only applies to global reclaim. For memcg reclaim,
the eviction will continue, even if it is overshooting. This becomes
unconditional due to code simplification.

Link: https://lkml.kernel.org/r/20221222041905.2431096-4-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michael Larabel <Michael@MichaelLarabel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Yu Zhao authored and Andrew Morton committed Jan 19, 2023
1 parent 6df1b22 commit a579086
Showing 1 changed file with 23 additions and 58 deletions.
81 changes: 23 additions & 58 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,6 +449,11 @@ static bool cgroup_reclaim(struct scan_control *sc)
return sc->target_mem_cgroup;
}

static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
}

/**
* writeback_throttling_sane - is the usual dirty throttling mechanism available?
* @sc: scan_control in question
Expand Down Expand Up @@ -499,6 +504,11 @@ static bool cgroup_reclaim(struct scan_control *sc)
return false;
}

static bool global_reclaim(struct scan_control *sc)
{
return true;
}

static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
Expand Down Expand Up @@ -5006,8 +5016,7 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw
return scanned;
}

static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
bool *need_swapping)
static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
{
int type;
int scanned;
Expand Down Expand Up @@ -5096,9 +5105,6 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
goto retry;
}

if (need_swapping && type == LRU_GEN_ANON)
*need_swapping = true;

return scanned;
}

Expand Down Expand Up @@ -5138,67 +5144,26 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
}

static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
struct scan_control *sc, bool need_swapping)
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
{
int i;
DEFINE_MAX_SEQ(lruvec);

if (!current_is_kswapd()) {
/* age each memcg at most once to ensure fairness */
if (max_seq - seq > 1)
return true;

/* over-swapping can increase allocation latency */
if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
return true;

/* give this thread a chance to exit and free its memory */
if (fatal_signal_pending(current)) {
sc->nr_reclaimed += MIN_LRU_BATCH;
return true;
}

if (cgroup_reclaim(sc))
return false;
} else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
return false;

/* keep scanning at low priorities to ensure fairness */
if (sc->priority > DEF_PRIORITY - 2)
return false;

/*
* A minimum amount of work was done under global memory pressure. For
* kswapd, it may be overshooting. For direct reclaim, the allocation
* may succeed if all suitable zones are somewhat safe. In either case,
* it's better to stop now, and restart later if necessary.
*/
for (i = 0; i <= sc->reclaim_idx; i++) {
unsigned long wmark;
struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;

if (!managed_zone(zone))
continue;

wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
if (wmark > zone_page_state(zone, NR_FREE_PAGES))
return false;
}
/* don't abort memcg reclaim to ensure fairness */
if (!global_reclaim(sc))
return -1;

sc->nr_reclaimed += MIN_LRU_BATCH;
/* discount the previous progress for kswapd */
if (current_is_kswapd())
return sc->nr_to_reclaim + sc->last_reclaimed;

return true;
return max(sc->nr_to_reclaim, compact_gap(sc->order));
}

static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
struct blk_plug plug;
bool need_aging = false;
bool need_swapping = false;
unsigned long scanned = 0;
unsigned long reclaimed = sc->nr_reclaimed;
DEFINE_MAX_SEQ(lruvec);
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);

lru_add_drain();

Expand All @@ -5222,15 +5187,15 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
if (!nr_to_scan)
goto done;

delta = evict_folios(lruvec, sc, swappiness, &need_swapping);
delta = evict_folios(lruvec, sc, swappiness);
if (!delta)
goto done;

scanned += delta;
if (scanned >= nr_to_scan)
break;

if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
if (sc->nr_reclaimed >= nr_to_reclaim)
break;

cond_resched();
Expand Down Expand Up @@ -5677,7 +5642,7 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
if (sc->nr_reclaimed >= nr_to_reclaim)
return 0;

if (!evict_folios(lruvec, sc, swappiness, NULL))
if (!evict_folios(lruvec, sc, swappiness))
return 0;

cond_resched();
Expand Down

0 comments on commit a579086

Please sign in to comment.