mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-28 09:39:00 +00:00
68426e54ed
All patches automatically rebased. Build system: x86_64 Build-tested: bcm2711/RPi4B, filogic/xiaomi_redmi-router-ax6000-stock Run-tested: bcm2711/RPi4B, filogic/xiaomi_redmi-router-ax6000-stock Signed-off-by: John Audia <therealgraysky@proton.me>
225 lines
7.0 KiB
Diff
225 lines
7.0 KiB
Diff
From a810f8e2f1bdd0707eaf05c8b4ba84a3ff2801bd Mon Sep 17 00:00:00 2001
|
|
From: Yu Zhao <yuzhao@google.com>
|
|
Date: Sun, 27 Sep 2020 20:49:08 -0600
|
|
Subject: [PATCH 03/10] mm/vmscan.c: refactor shrink_node()
|
|
|
|
This patch refactors shrink_node(). This will make the upcoming
|
|
changes to mm/vmscan.c more readable.
|
|
|
|
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
|
Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
|
|
Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04
|
|
---
|
|
mm/vmscan.c | 186 +++++++++++++++++++++++++++-------------------------
|
|
1 file changed, 98 insertions(+), 88 deletions(-)
|
|
|
|
--- a/mm/vmscan.c
|
|
+++ b/mm/vmscan.c
|
|
@@ -2497,6 +2497,103 @@ enum scan_balance {
|
|
SCAN_FILE,
|
|
};
|
|
|
|
+static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
|
|
+{
|
|
+ unsigned long file;
|
|
+ struct lruvec *target_lruvec;
|
|
+
|
|
+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
|
|
+
|
|
+ /*
|
|
+ * Determine the scan balance between anon and file LRUs.
|
|
+ */
|
|
+ spin_lock_irq(&target_lruvec->lru_lock);
|
|
+ sc->anon_cost = target_lruvec->anon_cost;
|
|
+ sc->file_cost = target_lruvec->file_cost;
|
|
+ spin_unlock_irq(&target_lruvec->lru_lock);
|
|
+
|
|
+ /*
|
|
+ * Target desirable inactive:active list ratios for the anon
|
|
+ * and file LRU lists.
|
|
+ */
|
|
+ if (!sc->force_deactivate) {
|
|
+ unsigned long refaults;
|
|
+
|
|
+ refaults = lruvec_page_state(target_lruvec,
|
|
+ WORKINGSET_ACTIVATE_ANON);
|
|
+ if (refaults != target_lruvec->refaults[0] ||
|
|
+ inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
|
|
+ sc->may_deactivate |= DEACTIVATE_ANON;
|
|
+ else
|
|
+ sc->may_deactivate &= ~DEACTIVATE_ANON;
|
|
+
|
|
+ /*
|
|
+ * When refaults are being observed, it means a new
|
|
+ * workingset is being established. Deactivate to get
|
|
+ * rid of any stale active pages quickly.
|
|
+ */
|
|
+ refaults = lruvec_page_state(target_lruvec,
|
|
+ WORKINGSET_ACTIVATE_FILE);
|
|
+ if (refaults != target_lruvec->refaults[1] ||
|
|
+ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
|
|
+ sc->may_deactivate |= DEACTIVATE_FILE;
|
|
+ else
|
|
+ sc->may_deactivate &= ~DEACTIVATE_FILE;
|
|
+ } else
|
|
+ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
|
|
+
|
|
+ /*
|
|
+ * If we have plenty of inactive file pages that aren't
|
|
+ * thrashing, try to reclaim those first before touching
|
|
+ * anonymous pages.
|
|
+ */
|
|
+ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
|
|
+ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
|
|
+ sc->cache_trim_mode = 1;
|
|
+ else
|
|
+ sc->cache_trim_mode = 0;
|
|
+
|
|
+ /*
|
|
+ * Prevent the reclaimer from falling into the cache trap: as
|
|
+ * cache pages start out inactive, every cache fault will tip
|
|
+ * the scan balance towards the file LRU. And as the file LRU
|
|
+ * shrinks, so does the window for rotation from references.
|
|
+ * This means we have a runaway feedback loop where a tiny
|
|
+ * thrashing file LRU becomes infinitely more attractive than
|
|
+ * anon pages. Try to detect this based on file LRU size.
|
|
+ */
|
|
+ if (!cgroup_reclaim(sc)) {
|
|
+ unsigned long total_high_wmark = 0;
|
|
+ unsigned long free, anon;
|
|
+ int z;
|
|
+
|
|
+ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
|
|
+ file = node_page_state(pgdat, NR_ACTIVE_FILE) +
|
|
+ node_page_state(pgdat, NR_INACTIVE_FILE);
|
|
+
|
|
+ for (z = 0; z < MAX_NR_ZONES; z++) {
|
|
+ struct zone *zone = &pgdat->node_zones[z];
|
|
+
|
|
+ if (!managed_zone(zone))
|
|
+ continue;
|
|
+
|
|
+ total_high_wmark += high_wmark_pages(zone);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Consider anon: if that's low too, this isn't a
|
|
+ * runaway file reclaim problem, but rather just
|
|
+ * extreme pressure. Reclaim as per usual then.
|
|
+ */
|
|
+ anon = node_page_state(pgdat, NR_INACTIVE_ANON);
|
|
+
|
|
+ sc->file_is_tiny =
|
|
+ file + free <= total_high_wmark &&
|
|
+ !(sc->may_deactivate & DEACTIVATE_ANON) &&
|
|
+ anon >> sc->priority;
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* Determine how aggressively the anon and file LRU lists should be
|
|
* scanned. The relative value of each set of LRU lists is determined
|
|
@@ -2965,7 +3062,6 @@ static void shrink_node(pg_data_t *pgdat
|
|
unsigned long nr_reclaimed, nr_scanned;
|
|
struct lruvec *target_lruvec;
|
|
bool reclaimable = false;
|
|
- unsigned long file;
|
|
|
|
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
|
|
|
|
@@ -2981,93 +3077,7 @@ again:
|
|
nr_reclaimed = sc->nr_reclaimed;
|
|
nr_scanned = sc->nr_scanned;
|
|
|
|
- /*
|
|
- * Determine the scan balance between anon and file LRUs.
|
|
- */
|
|
- spin_lock_irq(&target_lruvec->lru_lock);
|
|
- sc->anon_cost = target_lruvec->anon_cost;
|
|
- sc->file_cost = target_lruvec->file_cost;
|
|
- spin_unlock_irq(&target_lruvec->lru_lock);
|
|
-
|
|
- /*
|
|
- * Target desirable inactive:active list ratios for the anon
|
|
- * and file LRU lists.
|
|
- */
|
|
- if (!sc->force_deactivate) {
|
|
- unsigned long refaults;
|
|
-
|
|
- refaults = lruvec_page_state(target_lruvec,
|
|
- WORKINGSET_ACTIVATE_ANON);
|
|
- if (refaults != target_lruvec->refaults[0] ||
|
|
- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
|
|
- sc->may_deactivate |= DEACTIVATE_ANON;
|
|
- else
|
|
- sc->may_deactivate &= ~DEACTIVATE_ANON;
|
|
-
|
|
- /*
|
|
- * When refaults are being observed, it means a new
|
|
- * workingset is being established. Deactivate to get
|
|
- * rid of any stale active pages quickly.
|
|
- */
|
|
- refaults = lruvec_page_state(target_lruvec,
|
|
- WORKINGSET_ACTIVATE_FILE);
|
|
- if (refaults != target_lruvec->refaults[1] ||
|
|
- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
|
|
- sc->may_deactivate |= DEACTIVATE_FILE;
|
|
- else
|
|
- sc->may_deactivate &= ~DEACTIVATE_FILE;
|
|
- } else
|
|
- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
|
|
-
|
|
- /*
|
|
- * If we have plenty of inactive file pages that aren't
|
|
- * thrashing, try to reclaim those first before touching
|
|
- * anonymous pages.
|
|
- */
|
|
- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
|
|
- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
|
|
- sc->cache_trim_mode = 1;
|
|
- else
|
|
- sc->cache_trim_mode = 0;
|
|
-
|
|
- /*
|
|
- * Prevent the reclaimer from falling into the cache trap: as
|
|
- * cache pages start out inactive, every cache fault will tip
|
|
- * the scan balance towards the file LRU. And as the file LRU
|
|
- * shrinks, so does the window for rotation from references.
|
|
- * This means we have a runaway feedback loop where a tiny
|
|
- * thrashing file LRU becomes infinitely more attractive than
|
|
- * anon pages. Try to detect this based on file LRU size.
|
|
- */
|
|
- if (!cgroup_reclaim(sc)) {
|
|
- unsigned long total_high_wmark = 0;
|
|
- unsigned long free, anon;
|
|
- int z;
|
|
-
|
|
- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
|
|
- file = node_page_state(pgdat, NR_ACTIVE_FILE) +
|
|
- node_page_state(pgdat, NR_INACTIVE_FILE);
|
|
-
|
|
- for (z = 0; z < MAX_NR_ZONES; z++) {
|
|
- struct zone *zone = &pgdat->node_zones[z];
|
|
- if (!managed_zone(zone))
|
|
- continue;
|
|
-
|
|
- total_high_wmark += high_wmark_pages(zone);
|
|
- }
|
|
-
|
|
- /*
|
|
- * Consider anon: if that's low too, this isn't a
|
|
- * runaway file reclaim problem, but rather just
|
|
- * extreme pressure. Reclaim as per usual then.
|
|
- */
|
|
- anon = node_page_state(pgdat, NR_INACTIVE_ANON);
|
|
-
|
|
- sc->file_is_tiny =
|
|
- file + free <= total_high_wmark &&
|
|
- !(sc->may_deactivate & DEACTIVATE_ANON) &&
|
|
- anon >> sc->priority;
|
|
- }
|
|
+ prepare_scan_count(pgdat, sc);
|
|
|
|
shrink_node_memcgs(pgdat, sc);
|
|
|