2023-05-24 14:56:36 +00:00
|
|
|
From 8c20e2eb5f2a0175b774134685e4d7bd93e85ff8 Mon Sep 17 00:00:00 2001
|
2022-10-18 20:26:34 +00:00
|
|
|
From: Yu Zhao <yuzhao@google.com>
|
|
|
|
Date: Wed, 21 Dec 2022 21:18:59 -0700
|
2023-05-24 14:56:36 +00:00
|
|
|
Subject: [PATCH 01/19] UPSTREAM: mm: multi-gen LRU: rename lru_gen_struct to
|
2023-05-22 00:36:35 +00:00
|
|
|
lru_gen_folio
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
Patch series "mm: multi-gen LRU: memcg LRU", v3.
|
|
|
|
|
|
|
|
Overview
|
|
|
|
========
|
|
|
|
|
|
|
|
An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
|
2023-05-22 00:36:35 +00:00
|
|
|
since each node and memcg combination has an LRU of folios (see
|
2022-10-18 20:26:34 +00:00
|
|
|
mem_cgroup_lruvec()).
|
|
|
|
|
|
|
|
Its goal is to improve the scalability of global reclaim, which is
|
|
|
|
critical to system-wide memory overcommit in data centers. Note that
|
|
|
|
memcg reclaim is currently out of scope.
|
|
|
|
|
|
|
|
Its memory bloat is a pointer to each lruvec and negligible to each
|
|
|
|
pglist_data. In terms of traversing memcgs during global reclaim, it
|
|
|
|
improves the best-case complexity from O(n) to O(1) and does not affect
|
|
|
|
the worst-case complexity O(n). Therefore, on average, it has a sublinear
|
|
|
|
complexity in contrast to the current linear complexity.
|
|
|
|
|
|
|
|
The basic structure of an memcg LRU can be understood by an analogy to
|
2023-05-22 00:36:35 +00:00
|
|
|
the active/inactive LRU (of folios):
|
2022-10-18 20:26:34 +00:00
|
|
|
1. It has the young and the old (generations), i.e., the counterparts
|
|
|
|
to the active and the inactive;
|
|
|
|
2. The increment of max_seq triggers promotion, i.e., the counterpart
|
|
|
|
to activation;
|
|
|
|
3. Other events trigger similar operations, e.g., offlining an memcg
|
|
|
|
triggers demotion, i.e., the counterpart to deactivation.
|
|
|
|
|
|
|
|
In terms of global reclaim, it has two distinct features:
|
|
|
|
1. Sharding, which allows each thread to start at a random memcg (in
|
|
|
|
the old generation) and improves parallelism;
|
|
|
|
2. Eventual fairness, which allows direct reclaim to bail out at will
|
|
|
|
and reduces latency without affecting fairness over some time.
|
|
|
|
|
|
|
|
The commit message in patch 6 details the workflow:
|
|
|
|
https://lore.kernel.org/r/20221222041905.2431096-7-yuzhao@google.com/
|
|
|
|
|
|
|
|
The following is a simple test to quickly verify its effectiveness.
|
|
|
|
|
|
|
|
Test design:
|
|
|
|
1. Create multiple memcgs.
|
|
|
|
2. Each memcg contains a job (fio).
|
|
|
|
3. All jobs access the same amount of memory randomly.
|
|
|
|
4. The system does not experience global memory pressure.
|
|
|
|
5. Periodically write to the root memory.reclaim.
|
|
|
|
|
|
|
|
Desired outcome:
|
|
|
|
1. All memcgs have similar pgsteal counts, i.e., stddev(pgsteal)
|
|
|
|
over mean(pgsteal) is close to 0%.
|
|
|
|
2. The total pgsteal is close to the total requested through
|
|
|
|
memory.reclaim, i.e., sum(pgsteal) over sum(requested) is close
|
|
|
|
to 100%.
|
|
|
|
|
|
|
|
Actual outcome [1]:
|
|
|
|
MGLRU off MGLRU on
|
|
|
|
stddev(pgsteal) / mean(pgsteal) 75% 20%
|
|
|
|
sum(pgsteal) / sum(requested) 425% 95%
|
|
|
|
|
|
|
|
####################################################################
|
|
|
|
MEMCGS=128
|
|
|
|
|
|
|
|
for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
|
|
|
|
mkdir /sys/fs/cgroup/memcg$memcg
|
|
|
|
done
|
|
|
|
|
|
|
|
start() {
|
|
|
|
echo $BASHPID > /sys/fs/cgroup/memcg$memcg/cgroup.procs
|
|
|
|
|
|
|
|
fio -name=memcg$memcg --numjobs=1 --ioengine=mmap \
|
|
|
|
--filename=/dev/zero --size=1920M --rw=randrw \
|
|
|
|
--rate=64m,64m --random_distribution=random \
|
|
|
|
--fadvise_hint=0 --time_based --runtime=10h \
|
|
|
|
--group_reporting --minimal
|
|
|
|
}
|
|
|
|
|
|
|
|
for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
|
|
|
|
start &
|
|
|
|
done
|
|
|
|
|
|
|
|
sleep 600
|
|
|
|
|
|
|
|
for ((i = 0; i < 600; i++)); do
|
|
|
|
echo 256m >/sys/fs/cgroup/memory.reclaim
|
|
|
|
sleep 6
|
|
|
|
done
|
|
|
|
|
|
|
|
for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
|
|
|
|
grep "pgsteal " /sys/fs/cgroup/memcg$memcg/memory.stat
|
|
|
|
done
|
|
|
|
####################################################################
|
|
|
|
|
|
|
|
[1]: This was obtained from running the above script (touches less
|
|
|
|
than 256GB memory) on an EPYC 7B13 with 512GB DRAM for over an
|
|
|
|
hour.
|
|
|
|
|
|
|
|
This patch (of 8):
|
|
|
|
|
2023-05-22 00:36:35 +00:00
|
|
|
The new name lru_gen_folio will be more distinct from the coming
|
2022-10-18 20:26:34 +00:00
|
|
|
lru_gen_memcg.
|
|
|
|
|
|
|
|
Link: https://lkml.kernel.org/r/20221222041905.2431096-1-yuzhao@google.com
|
|
|
|
Link: https://lkml.kernel.org/r/20221222041905.2431096-2-yuzhao@google.com
|
|
|
|
Signed-off-by: Yu Zhao <yuzhao@google.com>
|
|
|
|
Cc: Johannes Weiner <hannes@cmpxchg.org>
|
|
|
|
Cc: Jonathan Corbet <corbet@lwn.net>
|
|
|
|
Cc: Michael Larabel <Michael@MichaelLarabel.com>
|
|
|
|
Cc: Michal Hocko <mhocko@kernel.org>
|
|
|
|
Cc: Mike Rapoport <rppt@kernel.org>
|
|
|
|
Cc: Roman Gushchin <roman.gushchin@linux.dev>
|
|
|
|
Cc: Suren Baghdasaryan <surenb@google.com>
|
|
|
|
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
2023-05-24 14:56:36 +00:00
|
|
|
Bug: 274865848
|
|
|
|
(cherry picked from commit 391655fe08d1f942359a11148aa9aaf3f99d6d6f)
|
|
|
|
Change-Id: I7df67e0e2435ba28f10eaa57d28d98b61a9210a6
|
|
|
|
Signed-off-by: T.J. Mercier <tjmercier@google.com>
|
2022-10-18 20:26:34 +00:00
|
|
|
---
|
|
|
|
include/linux/mm_inline.h | 4 ++--
|
|
|
|
include/linux/mmzone.h | 6 +++---
|
|
|
|
mm/vmscan.c | 34 +++++++++++++++++-----------------
|
|
|
|
mm/workingset.c | 4 ++--
|
|
|
|
4 files changed, 24 insertions(+), 24 deletions(-)
|
|
|
|
|
|
|
|
--- a/include/linux/mm_inline.h
|
|
|
|
+++ b/include/linux/mm_inline.h
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -178,7 +178,7 @@ static inline void lru_gen_update_size(s
|
2023-05-22 00:36:35 +00:00
|
|
|
int zone = folio_zonenum(folio);
|
2023-05-22 16:06:14 +00:00
|
|
|
int delta = folio_nr_pages(folio);
|
2022-10-18 20:26:34 +00:00
|
|
|
enum lru_list lru = type * LRU_INACTIVE_FILE;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
|
|
|
|
VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -224,7 +224,7 @@ static inline bool lru_gen_add_folio(str
|
2023-05-22 00:36:35 +00:00
|
|
|
int gen = folio_lru_gen(folio);
|
|
|
|
int type = folio_is_file_lru(folio);
|
|
|
|
int zone = folio_zonenum(folio);
|
2022-10-18 20:26:34 +00:00
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
2023-05-22 00:36:35 +00:00
|
|
|
VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
--- a/include/linux/mmzone.h
|
|
|
|
+++ b/include/linux/mmzone.h
|
2023-05-22 16:06:14 +00:00
|
|
|
@@ -404,7 +404,7 @@ enum {
|
|
|
|
* The number of pages in each generation is eventually consistent and therefore
|
2022-10-18 20:26:34 +00:00
|
|
|
* can be transiently negative when reset_batch_size() is pending.
|
|
|
|
*/
|
|
|
|
-struct lru_gen_struct {
|
2023-05-22 00:36:35 +00:00
|
|
|
+struct lru_gen_folio {
|
2022-10-18 20:26:34 +00:00
|
|
|
/* the aging increments the youngest generation number */
|
|
|
|
unsigned long max_seq;
|
|
|
|
/* the eviction increments the oldest generation numbers */
|
2023-05-22 16:06:14 +00:00
|
|
|
@@ -461,7 +461,7 @@ struct lru_gen_mm_state {
|
2022-10-18 20:26:34 +00:00
|
|
|
struct lru_gen_mm_walk {
|
|
|
|
/* the lruvec under reclaim */
|
|
|
|
struct lruvec *lruvec;
|
|
|
|
- /* unstable max_seq from lru_gen_struct */
|
2023-05-22 00:36:35 +00:00
|
|
|
+ /* unstable max_seq from lru_gen_folio */
|
2022-10-18 20:26:34 +00:00
|
|
|
unsigned long max_seq;
|
|
|
|
/* the next address within an mm to scan */
|
|
|
|
unsigned long next_addr;
|
2023-05-22 16:06:14 +00:00
|
|
|
@@ -524,7 +524,7 @@ struct lruvec {
|
2022-10-18 20:26:34 +00:00
|
|
|
unsigned long flags;
|
|
|
|
#ifdef CONFIG_LRU_GEN
|
|
|
|
/* evictable pages divided into generations */
|
|
|
|
- struct lru_gen_struct lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
/* to concurrently iterate lru_gen_mm_list */
|
|
|
|
struct lru_gen_mm_state mm_state;
|
|
|
|
#endif
|
|
|
|
--- a/mm/vmscan.c
|
|
|
|
+++ b/mm/vmscan.c
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -3190,7 +3190,7 @@ static int get_nr_gens(struct lruvec *lr
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
|
|
|
|
{
|
|
|
|
- /* see the comment on lru_gen_struct */
|
2023-05-22 00:36:35 +00:00
|
|
|
+ /* see the comment on lru_gen_folio */
|
2022-10-18 20:26:34 +00:00
|
|
|
return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
|
|
|
|
get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
|
|
|
|
get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
|
2023-05-22 16:06:14 +00:00
|
|
|
@@ -3596,7 +3596,7 @@ struct ctrl_pos {
|
2022-10-18 20:26:34 +00:00
|
|
|
static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
|
|
|
|
struct ctrl_pos *pos)
|
|
|
|
{
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
|
|
|
|
|
|
|
|
pos->refaulted = lrugen->avg_refaulted[type][tier] +
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -3611,7 +3611,7 @@ static void read_ctrl_pos(struct lruvec
|
2022-10-18 20:26:34 +00:00
|
|
|
static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
|
|
|
|
{
|
|
|
|
int hist, tier;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
|
|
|
|
unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
|
|
|
|
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -3688,7 +3688,7 @@ static int folio_update_gen(struct folio
|
2023-05-22 00:36:35 +00:00
|
|
|
static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
|
2022-10-18 20:26:34 +00:00
|
|
|
{
|
2023-05-22 00:36:35 +00:00
|
|
|
int type = folio_is_file_lru(folio);
|
2022-10-18 20:26:34 +00:00
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
|
2023-05-22 00:36:35 +00:00
|
|
|
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
|
2022-10-18 20:26:34 +00:00
|
|
|
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -3733,7 +3733,7 @@ static void update_batch_size(struct lru
|
2022-10-18 20:26:34 +00:00
|
|
|
static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
|
|
|
|
{
|
|
|
|
int gen, type, zone;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
walk->batched = 0;
|
|
|
|
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -4250,7 +4250,7 @@ static bool inc_min_seq(struct lruvec *l
|
2022-10-18 20:26:34 +00:00
|
|
|
{
|
|
|
|
int zone;
|
|
|
|
int remaining = MAX_LRU_BATCH;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
|
|
|
|
|
|
|
|
if (type == LRU_GEN_ANON && !can_swap)
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -4286,7 +4286,7 @@ static bool try_to_inc_min_seq(struct lr
|
2022-10-18 20:26:34 +00:00
|
|
|
{
|
|
|
|
int gen, type, zone;
|
|
|
|
bool success = false;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
DEFINE_MIN_SEQ(lruvec);
|
|
|
|
|
|
|
|
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -4307,7 +4307,7 @@ next:
|
2022-10-18 20:26:34 +00:00
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
- /* see the comment on lru_gen_struct */
|
2023-05-22 00:36:35 +00:00
|
|
|
+ /* see the comment on lru_gen_folio */
|
2022-10-18 20:26:34 +00:00
|
|
|
if (can_swap) {
|
|
|
|
min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
|
|
|
|
min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -4329,7 +4329,7 @@ static void inc_max_seq(struct lruvec *l
|
2022-10-18 20:26:34 +00:00
|
|
|
{
|
|
|
|
int prev, next;
|
|
|
|
int type, zone;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
2023-09-19 18:00:07 +00:00
|
|
|
restart:
|
2022-10-18 20:26:34 +00:00
|
|
|
spin_lock_irq(&lruvec->lru_lock);
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -4389,7 +4389,7 @@ static bool try_to_inc_max_seq(struct lr
|
2022-10-18 20:26:34 +00:00
|
|
|
bool success;
|
|
|
|
struct lru_gen_mm_walk *walk;
|
|
|
|
struct mm_struct *mm = NULL;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
|
|
|
|
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -4454,7 +4454,7 @@ static bool should_run_aging(struct lruv
|
2022-10-18 20:26:34 +00:00
|
|
|
unsigned long old = 0;
|
|
|
|
unsigned long young = 0;
|
|
|
|
unsigned long total = 0;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
|
|
|
|
|
|
|
for (type = !can_swap; type < ANON_AND_FILE; type++) {
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -4740,7 +4740,7 @@ static bool sort_folio(struct lruvec *lr
|
2023-05-22 16:06:14 +00:00
|
|
|
int delta = folio_nr_pages(folio);
|
2023-05-22 00:36:35 +00:00
|
|
|
int refs = folio_lru_refs(folio);
|
2022-10-18 20:26:34 +00:00
|
|
|
int tier = lru_tier_from_refs(refs);
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
2023-05-22 16:06:14 +00:00
|
|
|
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
|
2022-10-18 20:26:34 +00:00
|
|
|
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -4848,7 +4848,7 @@ static int scan_folios(struct lruvec *lr
|
2022-10-18 20:26:34 +00:00
|
|
|
int scanned = 0;
|
|
|
|
int isolated = 0;
|
|
|
|
int remaining = MAX_LRU_BATCH;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
|
|
|
|
|
|
|
VM_WARN_ON_ONCE(!list_empty(list));
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -5249,7 +5249,7 @@ done:
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
|
|
|
|
{
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
if (lrugen->enabled) {
|
|
|
|
enum lru_list lru;
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -5531,7 +5531,7 @@ static void lru_gen_seq_show_full(struct
|
2022-10-18 20:26:34 +00:00
|
|
|
int i;
|
|
|
|
int type, tier;
|
|
|
|
int hist = lru_hist_from_seq(seq);
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
|
|
|
|
seq_printf(m, " %10d", tier);
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -5581,7 +5581,7 @@ static int lru_gen_seq_show(struct seq_f
|
2022-10-18 20:26:34 +00:00
|
|
|
unsigned long seq;
|
|
|
|
bool full = !debugfs_real_fops(m->file)->write;
|
|
|
|
struct lruvec *lruvec = v;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
int nid = lruvec_pgdat(lruvec)->node_id;
|
|
|
|
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
|
|
|
|
DEFINE_MAX_SEQ(lruvec);
|
2023-09-19 18:00:07 +00:00
|
|
|
@@ -5835,7 +5835,7 @@ void lru_gen_init_lruvec(struct lruvec *
|
2022-10-18 20:26:34 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int gen, type, zone;
|
|
|
|
- struct lru_gen_struct *lrugen = &lruvec->lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
|
|
|
|
lrugen->max_seq = MIN_NR_GENS + 1;
|
|
|
|
lrugen->enabled = lru_gen_enabled();
|
|
|
|
--- a/mm/workingset.c
|
|
|
|
+++ b/mm/workingset.c
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct fol
|
2022-10-18 20:26:34 +00:00
|
|
|
unsigned long token;
|
|
|
|
unsigned long min_seq;
|
|
|
|
struct lruvec *lruvec;
|
|
|
|
- struct lru_gen_struct *lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen;
|
|
|
|
int type = folio_is_file_lru(folio);
|
2023-05-22 16:06:14 +00:00
|
|
|
int delta = folio_nr_pages(folio);
|
2023-05-22 00:36:35 +00:00
|
|
|
int refs = folio_lru_refs(folio);
|
2023-05-30 01:41:35 +00:00
|
|
|
@@ -252,7 +252,7 @@ static void lru_gen_refault(struct folio
|
2022-10-18 20:26:34 +00:00
|
|
|
unsigned long token;
|
|
|
|
unsigned long min_seq;
|
|
|
|
struct lruvec *lruvec;
|
|
|
|
- struct lru_gen_struct *lrugen;
|
2023-05-22 00:36:35 +00:00
|
|
|
+ struct lru_gen_folio *lrugen;
|
2022-10-18 20:26:34 +00:00
|
|
|
struct mem_cgroup *memcg;
|
|
|
|
struct pglist_data *pgdat;
|
2023-05-22 00:36:35 +00:00
|
|
|
int type = folio_is_file_lru(folio);
|