mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-19 05:38:00 +00:00
12f12df569
Changelog: https://cdn.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.6.55 Added the following default ksym to target/linux/generic/config-6.6: CONFIG_PROC_MEM_ALWAYS_FORCE=y # CONFIG_PROC_MEM_FORCE_PTRACE is not set # CONFIG_PROC_MEM_NO_FORCE is not set Removed upstreamed: generic/backport-6.6/780-23-v6.12-r8169-Fix-spelling-mistake-tx_underun-tx_underrun.patch[1] generic/backport-6.6/780-25-v6.12-r8169-add-tally-counter-fields-added-with-RTL8125.patch[2] generic/pending-6.6/684-gso-fix-gso-fraglist-segmentation-after-pull-from-fr.patch[3] lantiq/patches-6.6/0025-v6.12-net-ethernet-lantiq_etop-fix-memory-disclosure.patch[4] Manually rebased: bcm27xx/patches-6.6/950-0086-Main-bcm2708-bcm2709-linux-port.patch bcm27xx/patches-6.6/950-0998-i2c-designware-Add-support-for-bus-clear-feature.patch All other patches automatically rebased. 1. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.56&id=f02fcb7283b1c25f7e3ae07d7a2c830e06eb1a62 2. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.56&id=1c723d785adb711496bc64c24240f952f4faaabf 3. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.56&id=af3122f5fdc0d00581d6e598a668df6bf54c9daa 4. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.56&id=e66e38d07b31e177ca430758ed97fbc79f27d966 Build system: x86/64 Build-tested: x86/64/AMD Cezanne, flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3 Run-tested: x86/64/AMD Cezanne, flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3 Signed-off-by: John Audia <therealgraysky@proton.me> Link: https://github.com/openwrt/openwrt/pull/16655 Signed-off-by: Nick Hainke <vincent@systemli.org>
165 lines
5.5 KiB
Diff
165 lines
5.5 KiB
Diff
From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Mon, 25 Mar 2024 08:40:31 +0100
|
|
Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
|
|
|
|
The rps_lock.*() functions use the inner lock of a sk_buff_head for
|
|
locking. This lock is used if RPS is enabled, otherwise the list is
|
|
accessed lockless and disabling interrupts is enough for the
|
|
synchronisation because it is only accessed CPU local. Not only the list
|
|
is protected but also the NAPI state protected.
|
|
With the addition of backlog threads, the lock is also needed because of
|
|
the cross CPU access even without RPS. The clean up of the defer_list
|
|
list is also done via backlog threads (if enabled).
|
|
|
|
It has been suggested to rename the locking function since it is no
|
|
longer just RPS.
|
|
|
|
Rename the rps_lock*() functions to backlog_lock*().
|
|
|
|
Suggested-by: Jakub Kicinski <kuba@kernel.org>
|
|
Acked-by: Jakub Kicinski <kuba@kernel.org>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
|
---
|
|
net/core/dev.c | 34 +++++++++++++++++-----------------
|
|
1 file changed, 17 insertions(+), 17 deletions(-)
|
|
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
|
|
|
|
#endif
|
|
|
|
-static inline void rps_lock_irqsave(struct softnet_data *sd,
|
|
- unsigned long *flags)
|
|
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
|
|
+ unsigned long *flags)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
|
|
@@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
|
|
local_irq_save(*flags);
|
|
}
|
|
|
|
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
|
|
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_lock_irq(&sd->input_pkt_queue.lock);
|
|
@@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
|
|
local_irq_disable();
|
|
}
|
|
|
|
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
|
|
- unsigned long *flags)
|
|
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
|
|
+ unsigned long *flags)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
|
|
@@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
|
|
local_irq_restore(*flags);
|
|
}
|
|
|
|
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
|
|
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_unlock_irq(&sd->input_pkt_queue.lock);
|
|
@@ -4783,12 +4783,12 @@ void kick_defer_list_purge(struct softne
|
|
unsigned long flags;
|
|
|
|
if (use_backlog_threads()) {
|
|
- rps_lock_irqsave(sd, &flags);
|
|
+ backlog_lock_irq_save(sd, &flags);
|
|
|
|
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
|
|
__napi_schedule_irqoff(&sd->backlog);
|
|
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
|
|
} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
|
|
smp_call_function_single_async(cpu, &sd->defer_csd);
|
|
@@ -4850,7 +4850,7 @@ static int enqueue_to_backlog(struct sk_
|
|
reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
|
sd = &per_cpu(softnet_data, cpu);
|
|
|
|
- rps_lock_irqsave(sd, &flags);
|
|
+ backlog_lock_irq_save(sd, &flags);
|
|
if (!netif_running(skb->dev))
|
|
goto drop;
|
|
qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
@@ -4859,7 +4859,7 @@ static int enqueue_to_backlog(struct sk_
|
|
enqueue:
|
|
__skb_queue_tail(&sd->input_pkt_queue, skb);
|
|
input_queue_tail_incr_save(sd, qtail);
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
return NET_RX_SUCCESS;
|
|
}
|
|
|
|
@@ -4874,7 +4874,7 @@ enqueue:
|
|
|
|
drop:
|
|
sd->dropped++;
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
|
|
dev_core_stats_rx_dropped_inc(skb->dev);
|
|
kfree_skb_reason(skb, reason);
|
|
@@ -5905,7 +5905,7 @@ static void flush_backlog(struct work_st
|
|
local_bh_disable();
|
|
sd = this_cpu_ptr(&softnet_data);
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
|
|
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
|
__skb_unlink(skb, &sd->input_pkt_queue);
|
|
@@ -5913,7 +5913,7 @@ static void flush_backlog(struct work_st
|
|
input_queue_head_incr(sd);
|
|
}
|
|
}
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
|
|
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
|
|
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
|
@@ -5931,14 +5931,14 @@ static bool flush_required(int cpu)
|
|
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
|
|
bool do_flush;
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
|
|
/* as insertion into process_queue happens with the rps lock held,
|
|
* process_queue access may race only with dequeue
|
|
*/
|
|
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
|
|
!skb_queue_empty_lockless(&sd->process_queue);
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
|
|
return do_flush;
|
|
#endif
|
|
@@ -6053,7 +6053,7 @@ static int process_backlog(struct napi_s
|
|
|
|
}
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
if (skb_queue_empty(&sd->input_pkt_queue)) {
|
|
/*
|
|
* Inline a custom version of __napi_complete().
|
|
@@ -6069,7 +6069,7 @@ static int process_backlog(struct napi_s
|
|
skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
|
&sd->process_queue);
|
|
}
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
}
|
|
|
|
return work;
|