mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-23 23:42:43 +00:00
92eb867873
Changelog: https://cdn.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.6.60
Removed upstreamed:
generic/backport-6.6/409-mtd-spi-nor-winbond-fix-w25q128-regression.patch
All other patches automatically rebased.
1. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.60&id=27a58a19bd20a7afe369da2ce6d4ebea70768acd
Build system: x86/64
Build-tested: x86/64/AMD Cezanne, flogic/glinet_gl-mt6000, flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3
Run-tested: x86/64/AMD Cezanne, flogic/glinet_gl-mt6000, flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3
Signed-off-by: John Audia <therealgraysky@proton.me>
Link: https://github.com/openwrt/openwrt/pull/16892
Signed-off-by: Nick Hainke <vincent@systemli.org>
(cherry picked from commit 85844cfc5c
)
165 lines
5.5 KiB
Diff
165 lines
5.5 KiB
Diff
From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Mon, 25 Mar 2024 08:40:31 +0100
|
|
Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
|
|
|
|
The rps_lock.*() functions use the inner lock of a sk_buff_head for
|
|
locking. This lock is used if RPS is enabled, otherwise the list is
|
|
accessed lockless and disabling interrupts is enough for the
|
|
synchronisation because it is only accessed CPU local. Not only the list
|
|
is protected but also the NAPI state protected.
|
|
With the addition of backlog threads, the lock is also needed because of
|
|
the cross CPU access even without RPS. The clean up of the defer_list
|
|
list is also done via backlog threads (if enabled).
|
|
|
|
It has been suggested to rename the locking function since it is no
|
|
longer just RPS.
|
|
|
|
Rename the rps_lock*() functions to backlog_lock*().
|
|
|
|
Suggested-by: Jakub Kicinski <kuba@kernel.org>
|
|
Acked-by: Jakub Kicinski <kuba@kernel.org>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
|
---
|
|
net/core/dev.c | 34 +++++++++++++++++-----------------
|
|
1 file changed, 17 insertions(+), 17 deletions(-)
|
|
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
|
|
|
|
#endif
|
|
|
|
-static inline void rps_lock_irqsave(struct softnet_data *sd,
|
|
- unsigned long *flags)
|
|
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
|
|
+ unsigned long *flags)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
|
|
@@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
|
|
local_irq_save(*flags);
|
|
}
|
|
|
|
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
|
|
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_lock_irq(&sd->input_pkt_queue.lock);
|
|
@@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
|
|
local_irq_disable();
|
|
}
|
|
|
|
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
|
|
- unsigned long *flags)
|
|
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
|
|
+ unsigned long *flags)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
|
|
@@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
|
|
local_irq_restore(*flags);
|
|
}
|
|
|
|
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
|
|
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_unlock_irq(&sd->input_pkt_queue.lock);
|
|
@@ -4787,12 +4787,12 @@ void kick_defer_list_purge(struct softne
|
|
unsigned long flags;
|
|
|
|
if (use_backlog_threads()) {
|
|
- rps_lock_irqsave(sd, &flags);
|
|
+ backlog_lock_irq_save(sd, &flags);
|
|
|
|
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
|
|
__napi_schedule_irqoff(&sd->backlog);
|
|
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
|
|
} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
|
|
smp_call_function_single_async(cpu, &sd->defer_csd);
|
|
@@ -4854,7 +4854,7 @@ static int enqueue_to_backlog(struct sk_
|
|
reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
|
sd = &per_cpu(softnet_data, cpu);
|
|
|
|
- rps_lock_irqsave(sd, &flags);
|
|
+ backlog_lock_irq_save(sd, &flags);
|
|
if (!netif_running(skb->dev))
|
|
goto drop;
|
|
qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
@@ -4863,7 +4863,7 @@ static int enqueue_to_backlog(struct sk_
|
|
enqueue:
|
|
__skb_queue_tail(&sd->input_pkt_queue, skb);
|
|
input_queue_tail_incr_save(sd, qtail);
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
return NET_RX_SUCCESS;
|
|
}
|
|
|
|
@@ -4878,7 +4878,7 @@ enqueue:
|
|
|
|
drop:
|
|
sd->dropped++;
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
|
|
dev_core_stats_rx_dropped_inc(skb->dev);
|
|
kfree_skb_reason(skb, reason);
|
|
@@ -5909,7 +5909,7 @@ static void flush_backlog(struct work_st
|
|
local_bh_disable();
|
|
sd = this_cpu_ptr(&softnet_data);
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
|
|
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
|
__skb_unlink(skb, &sd->input_pkt_queue);
|
|
@@ -5917,7 +5917,7 @@ static void flush_backlog(struct work_st
|
|
input_queue_head_incr(sd);
|
|
}
|
|
}
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
|
|
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
|
|
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
|
@@ -5935,14 +5935,14 @@ static bool flush_required(int cpu)
|
|
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
|
|
bool do_flush;
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
|
|
/* as insertion into process_queue happens with the rps lock held,
|
|
* process_queue access may race only with dequeue
|
|
*/
|
|
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
|
|
!skb_queue_empty_lockless(&sd->process_queue);
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
|
|
return do_flush;
|
|
#endif
|
|
@@ -6057,7 +6057,7 @@ static int process_backlog(struct napi_s
|
|
|
|
}
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
if (skb_queue_empty(&sd->input_pkt_queue)) {
|
|
/*
|
|
* Inline a custom version of __napi_complete().
|
|
@@ -6073,7 +6073,7 @@ static int process_backlog(struct napi_s
|
|
skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
|
&sd->process_queue);
|
|
}
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
}
|
|
|
|
return work;
|