2023-05-24 14:56:36 +00:00
|
|
|
From 14f9a7a15f3d1af351f30e0438fd747b7ac253b0 Mon Sep 17 00:00:00 2001
|
2022-10-18 20:26:34 +00:00
|
|
|
From: Yu Zhao <yuzhao@google.com>
|
|
|
|
Date: Wed, 21 Dec 2022 21:19:01 -0700
|
2023-05-24 14:56:36 +00:00
|
|
|
Subject: [PATCH 03/19] UPSTREAM: mm: multi-gen LRU: remove eviction fairness
|
|
|
|
safeguard
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
Recall that the eviction consumes the oldest generation: first it
|
2023-05-22 00:36:35 +00:00
|
|
|
bucket-sorts folios whose gen counters were updated by the aging and
|
2022-10-18 20:26:34 +00:00
|
|
|
reclaims the rest; then it increments lrugen->min_seq.
|
|
|
|
|
|
|
|
The current eviction fairness safeguard for global reclaim has a
|
|
|
|
dilemma: when there are multiple eligible memcgs, should it continue
|
|
|
|
or stop upon meeting the reclaim goal? If it continues, it overshoots
|
|
|
|
and increases direct reclaim latency; if it stops, it loses fairness
|
|
|
|
between memcgs it has taken memory away from and those it has yet to.
|
|
|
|
|
|
|
|
With memcg LRU, the eviction, while ensuring eventual fairness, will
|
|
|
|
stop upon meeting its goal. Therefore the current eviction fairness
|
|
|
|
safeguard for global reclaim will not be needed.
|
|
|
|
|
|
|
|
Note that memcg LRU only applies to global reclaim. For memcg reclaim,
|
|
|
|
the eviction will continue, even if it is overshooting. This becomes
|
|
|
|
unconditional due to code simplification.
|
|
|
|
|
|
|
|
Link: https://lkml.kernel.org/r/20221222041905.2431096-4-yuzhao@google.com
|
|
|
|
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
|
|
|
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
|
|
|
Cc: Jonathan Corbet <corbet@lwn.net>
|
|
|
|
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
|
|
|
Cc: Michal Hocko <mhocko@kernel.org>
|
|
|
|
Cc: Mike Rapoport <rppt@kernel.org>
|
|
|
|
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
|
|
|
Cc: Suren Baghdasaryan <surenb@google.com>
|
|
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
2023-05-24 14:56:36 +00:00
|
|
|
Bug: 274865848
|
|
|
|
(cherry picked from commit a579086c99ed70cc4bfc104348dbe3dd8f2787e6)
|
|
|
|
Change-Id: I08ac1b3c90e29cafd0566785aaa4bcdb5db7d22c
|
|
|
|
Signed-off-by: T.J. Mercier <tjmercier@google.com>
|
2022-10-18 20:26:34 +00:00
|
|
|
---
|
2023-05-24 14:56:36 +00:00
|
|
|
mm/vmscan.c | 81 +++++++++++++++--------------------------------------
|
|
|
|
1 file changed, 23 insertions(+), 58 deletions(-)
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
--- a/mm/vmscan.c
|
|
|
|
+++ b/mm/vmscan.c
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -448,6 +448,11 @@ static bool cgroup_reclaim(struct scan_c
|
2022-10-18 20:26:34 +00:00
|
|
|
return sc->target_mem_cgroup;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool global_reclaim(struct scan_control *sc)
|
|
|
|
+{
|
|
|
|
+ return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/**
|
|
|
|
* writeback_throttling_sane - is the usual dirty throttling mechanism available?
|
|
|
|
* @sc: scan_control in question
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -498,6 +503,11 @@ static bool cgroup_reclaim(struct scan_c
|
2022-10-18 20:26:34 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool global_reclaim(struct scan_control *sc)
|
|
|
|
+{
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static bool writeback_throttling_sane(struct scan_control *sc)
|
|
|
|
{
|
|
|
|
return true;
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -4993,8 +5003,7 @@ static int isolate_folios(struct lruvec
|
2022-10-18 20:26:34 +00:00
|
|
|
return scanned;
|
|
|
|
}
|
|
|
|
|
2023-05-22 00:36:35 +00:00
|
|
|
-static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
|
|
|
|
- bool *need_swapping)
|
|
|
|
+static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
|
2022-10-18 20:26:34 +00:00
|
|
|
{
|
|
|
|
int type;
|
|
|
|
int scanned;
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -5083,9 +5092,6 @@ retry:
|
2022-10-18 20:26:34 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (need_swapping && type == LRU_GEN_ANON)
|
|
|
|
- *need_swapping = true;
|
|
|
|
-
|
|
|
|
return scanned;
|
|
|
|
}
|
|
|
|
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -5124,67 +5130,26 @@ done:
|
2022-10-18 20:26:34 +00:00
|
|
|
return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
|
|
|
|
- struct scan_control *sc, bool need_swapping)
|
|
|
|
+static unsigned long get_nr_to_reclaim(struct scan_control *sc)
|
|
|
|
{
|
|
|
|
- int i;
|
|
|
|
- DEFINE_MAX_SEQ(lruvec);
|
|
|
|
-
|
|
|
|
- if (!current_is_kswapd()) {
|
2023-05-22 00:36:35 +00:00
|
|
|
- /* age each memcg at most once to ensure fairness */
|
2022-10-18 20:26:34 +00:00
|
|
|
- if (max_seq - seq > 1)
|
|
|
|
- return true;
|
|
|
|
-
|
|
|
|
- /* over-swapping can increase allocation latency */
|
|
|
|
- if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
|
|
|
|
- return true;
|
|
|
|
-
|
|
|
|
- /* give this thread a chance to exit and free its memory */
|
|
|
|
- if (fatal_signal_pending(current)) {
|
|
|
|
- sc->nr_reclaimed += MIN_LRU_BATCH;
|
|
|
|
- return true;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (cgroup_reclaim(sc))
|
|
|
|
- return false;
|
|
|
|
- } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- /* keep scanning at low priorities to ensure fairness */
|
|
|
|
- if (sc->priority > DEF_PRIORITY - 2)
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * A minimum amount of work was done under global memory pressure. For
|
2023-05-22 00:36:35 +00:00
|
|
|
- * kswapd, it may be overshooting. For direct reclaim, the allocation
|
|
|
|
- * may succeed if all suitable zones are somewhat safe. In either case,
|
|
|
|
- * it's better to stop now, and restart later if necessary.
|
2022-10-18 20:26:34 +00:00
|
|
|
- */
|
|
|
|
- for (i = 0; i <= sc->reclaim_idx; i++) {
|
|
|
|
- unsigned long wmark;
|
|
|
|
- struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
|
|
|
|
-
|
|
|
|
- if (!managed_zone(zone))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
|
|
|
|
- if (wmark > zone_page_state(zone, NR_FREE_PAGES))
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
+ /* don't abort memcg reclaim to ensure fairness */
|
|
|
|
+ if (!global_reclaim(sc))
|
|
|
|
+ return -1;
|
|
|
|
|
|
|
|
- sc->nr_reclaimed += MIN_LRU_BATCH;
|
|
|
|
+ /* discount the previous progress for kswapd */
|
|
|
|
+ if (current_is_kswapd())
|
|
|
|
+ return sc->nr_to_reclaim + sc->last_reclaimed;
|
|
|
|
|
|
|
|
- return true;
|
|
|
|
+ return max(sc->nr_to_reclaim, compact_gap(sc->order));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
|
|
|
{
|
|
|
|
struct blk_plug plug;
|
|
|
|
bool need_aging = false;
|
|
|
|
- bool need_swapping = false;
|
|
|
|
unsigned long scanned = 0;
|
|
|
|
unsigned long reclaimed = sc->nr_reclaimed;
|
|
|
|
- DEFINE_MAX_SEQ(lruvec);
|
|
|
|
+ unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
|
|
|
|
|
|
|
|
lru_add_drain();
|
|
|
|
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -5208,7 +5173,7 @@ static void lru_gen_shrink_lruvec(struct
|
2022-10-18 20:26:34 +00:00
|
|
|
if (!nr_to_scan)
|
|
|
|
goto done;
|
|
|
|
|
2023-05-22 00:36:35 +00:00
|
|
|
- delta = evict_folios(lruvec, sc, swappiness, &need_swapping);
|
|
|
|
+ delta = evict_folios(lruvec, sc, swappiness);
|
2022-10-18 20:26:34 +00:00
|
|
|
if (!delta)
|
|
|
|
goto done;
|
|
|
|
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -5216,7 +5181,7 @@ static void lru_gen_shrink_lruvec(struct
|
2022-10-18 20:26:34 +00:00
|
|
|
if (scanned >= nr_to_scan)
|
|
|
|
break;
|
|
|
|
|
|
|
|
- if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
|
|
|
|
+ if (sc->nr_reclaimed >= nr_to_reclaim)
|
|
|
|
break;
|
|
|
|
|
|
|
|
cond_resched();
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -5666,7 +5631,7 @@ static int run_eviction(struct lruvec *l
|
2022-10-18 20:26:34 +00:00
|
|
|
if (sc->nr_reclaimed >= nr_to_reclaim)
|
|
|
|
return 0;
|
|
|
|
|
2023-05-22 00:36:35 +00:00
|
|
|
- if (!evict_folios(lruvec, sc, swappiness, NULL))
|
|
|
|
+ if (!evict_folios(lruvec, sc, swappiness))
|
2022-10-18 20:26:34 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
cond_resched();
|