mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-20 14:13:16 +00:00
b357564463
Changelog: https://cdn.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.1.57 Manually rebased: generic/pending-6.1/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch Removed upstreamed: qualcommax/patches-6.1/0134-PCI-qcom-Fixing-broken-pcie-enumeration-for-2_3_3-co.patch[1] All other patches automatically rebased. 1. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.1.57&id=2dfb5f324d799f4545e17631415aba6d302a8e2b Build system: x86/64 Build-tested: x86/64/AMD Cezanne Run-tested: x86/64/AMD Cezanne Reviewed-by: Robert Marko <robimarko@gmail.com> Signed-off-by: John Audia <therealgraysky@proton.me>
68 lines
2.5 KiB
Diff
68 lines
2.5 KiB
Diff
From ad2b3cbdb8303c5d4eb6c7c6d6428443dbb8d547 Mon Sep 17 00:00:00 2001
|
|
From: David Plowman <david.plowman@raspberrypi.com>
|
|
Date: Tue, 29 Mar 2022 16:10:06 +0100
|
|
Subject: [PATCH] mm,page_alloc,cma: introduce a customisable threshold
|
|
for allocating pages in cma
|
|
|
|
On some platforms the cma area can be half the entire system memory,
|
|
meaning that allocations start happening in the cma area immediately.
|
|
This leads to fragmentation and subsequent fatal cma_alloc failures.
|
|
|
|
We introduce an "alloc_in_cma_threshold" parameter which requires that
|
|
this many sixteenths of the free pages must be in cma before it will
|
|
try to use them. By default this is set to 12, but the previous
|
|
behaviour can be restored by setting it to 8 on startup.
|
|
|
|
Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
|
|
---
|
|
mm/page_alloc.c | 28 +++++++++++++++++++++++++---
|
|
1 file changed, 25 insertions(+), 3 deletions(-)
|
|
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -253,6 +253,27 @@ EXPORT_SYMBOL(init_on_alloc);
|
|
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
|
|
EXPORT_SYMBOL(init_on_free);
|
|
|
|
+#define ALLOC_IN_CMA_THRESHOLD_MAX 16
|
|
+#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
|
|
+
|
|
+static unsigned long _alloc_in_cma_threshold __read_mostly
|
|
+ = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
|
|
+
|
|
+static int __init alloc_in_cma_threshold_setup(char *buf)
|
|
+{
|
|
+ unsigned long res;
|
|
+
|
|
+ if (kstrtoul(buf, 10, &res) < 0 ||
|
|
+ res > ALLOC_IN_CMA_THRESHOLD_MAX) {
|
|
+ pr_err("Bad alloc_cma_threshold value\n");
|
|
+ return 0;
|
|
+ }
|
|
+ _alloc_in_cma_threshold = res;
|
|
+ pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
|
|
+ return 0;
|
|
+}
|
|
+early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
|
|
+
|
|
static bool _init_on_alloc_enabled_early __read_mostly
|
|
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
|
|
static int __init early_init_on_alloc(char *buf)
|
|
@@ -3073,12 +3094,13 @@ __rmqueue(struct zone *zone, unsigned in
|
|
if (IS_ENABLED(CONFIG_CMA)) {
|
|
/*
|
|
* Balance movable allocations between regular and CMA areas by
|
|
- * allocating from CMA when over half of the zone's free memory
|
|
- * is in the CMA area.
|
|
+ * allocating from CMA when over more than a given proportion of
|
|
+ * the zone's free memory is in the CMA area.
|
|
*/
|
|
if (alloc_flags & ALLOC_CMA &&
|
|
zone_page_state(zone, NR_FREE_CMA_PAGES) >
|
|
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
|
|
+ zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
|
|
+ * _alloc_in_cma_threshold) {
|
|
page = __rmqueue_cma_fallback(zone, order);
|
|
if (page)
|
|
return page;
|