mirror of
https://github.com/openwrt/openwrt.git
synced 2024-12-22 06:57:57 +00:00
6869ae2ab5
Signed-off-by: Felix Fietkau <nbd@nbd.name>
215 lines
7.1 KiB
Diff
215 lines
7.1 KiB
Diff
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Wed, 13 Mar 2019 19:09:22 +0100
|
|
Subject: [PATCH] mac80211: rework locking for txq scheduling / airtime
|
|
fairness
|
|
|
|
Holding the lock around the entire duration of tx scheduling can create
|
|
some nasty lock contention, especially when processing airtime information
|
|
from the tx status or the rx path.
|
|
Improve locking by only holding the active_txq_lock for lookups / scheduling
|
|
list modifications.
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
|
|
--- a/include/net/mac80211.h
|
|
+++ b/include/net/mac80211.h
|
|
@@ -6069,8 +6069,6 @@ struct sk_buff *ieee80211_tx_dequeue(str
|
|
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
|
* @ac: AC number to return packets from.
|
|
*
|
|
- * Should only be called between calls to ieee80211_txq_schedule_start()
|
|
- * and ieee80211_txq_schedule_end().
|
|
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
|
|
* is returned, it should be returned with ieee80211_return_txq() after the
|
|
* driver has finished scheduling it.
|
|
@@ -6078,51 +6076,41 @@ struct sk_buff *ieee80211_tx_dequeue(str
|
|
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
|
|
|
|
/**
|
|
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
|
|
- *
|
|
- * @hw: pointer as obtained from ieee80211_alloc_hw()
|
|
- * @txq: pointer obtained from station or virtual interface
|
|
- *
|
|
- * Should only be called between calls to ieee80211_txq_schedule_start()
|
|
- * and ieee80211_txq_schedule_end().
|
|
- */
|
|
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
|
-
|
|
-/**
|
|
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
|
|
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
|
|
*
|
|
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
|
* @ac: AC number to acquire locks for
|
|
*
|
|
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
|
|
- * before ieee80211_next_txq() or ieee80211_return_txq().
|
|
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
|
|
*/
|
|
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
|
- __acquires(txq_lock);
|
|
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
|
|
+
|
|
+/* (deprecated) */
|
|
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
|
+{
|
|
+}
|
|
|
|
/**
|
|
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
|
|
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
|
|
*
|
|
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
|
- * @ac: AC number to acquire locks for
|
|
+ * @txq: pointer obtained from station or virtual interface
|
|
*
|
|
- * Release locks previously acquired by ieee80211_txq_schedule_end().
|
|
+ * Schedules a TXQ for transmission if it is not already scheduled.
|
|
*/
|
|
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
|
- __releases(txq_lock);
|
|
+void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
|
|
|
/**
|
|
- * ieee80211_schedule_txq - schedule a TXQ for transmission
|
|
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
|
|
*
|
|
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
|
* @txq: pointer obtained from station or virtual interface
|
|
- *
|
|
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
|
|
- * lock, which means it must *not* be called between
|
|
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
|
|
*/
|
|
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
|
- __acquires(txq_lock) __releases(txq_lock);
|
|
+static inline void
|
|
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
|
+{
|
|
+ ieee80211_schedule_txq(hw, txq);
|
|
+}
|
|
|
|
/**
|
|
* ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
|
|
--- a/net/mac80211/tx.c
|
|
+++ b/net/mac80211/tx.c
|
|
@@ -3619,16 +3619,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
|
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
|
{
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
+ struct ieee80211_txq *ret = NULL;
|
|
struct txq_info *txqi = NULL;
|
|
|
|
- lockdep_assert_held(&local->active_txq_lock[ac]);
|
|
+ spin_lock_bh(&local->active_txq_lock[ac]);
|
|
|
|
begin:
|
|
txqi = list_first_entry_or_null(&local->active_txqs[ac],
|
|
struct txq_info,
|
|
schedule_order);
|
|
if (!txqi)
|
|
- return NULL;
|
|
+ goto out;
|
|
|
|
if (txqi->txq.sta) {
|
|
struct sta_info *sta = container_of(txqi->txq.sta,
|
|
@@ -3645,21 +3646,25 @@ struct ieee80211_txq *ieee80211_next_txq
|
|
|
|
|
|
if (txqi->schedule_round == local->schedule_round[ac])
|
|
- return NULL;
|
|
+ goto out;
|
|
|
|
list_del_init(&txqi->schedule_order);
|
|
txqi->schedule_round = local->schedule_round[ac];
|
|
- return &txqi->txq;
|
|
+ ret = &txqi->txq;
|
|
+
|
|
+out:
|
|
+ spin_unlock_bh(&local->active_txq_lock[ac]);
|
|
+ return ret;
|
|
}
|
|
EXPORT_SYMBOL(ieee80211_next_txq);
|
|
|
|
-void ieee80211_return_txq(struct ieee80211_hw *hw,
|
|
- struct ieee80211_txq *txq)
|
|
+void ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
|
+ struct ieee80211_txq *txq)
|
|
{
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
struct txq_info *txqi = to_txq_info(txq);
|
|
|
|
- lockdep_assert_held(&local->active_txq_lock[txq->ac]);
|
|
+ spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
|
|
|
if (list_empty(&txqi->schedule_order) &&
|
|
(!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
|
|
@@ -3679,18 +3684,7 @@ void ieee80211_return_txq(struct ieee802
|
|
list_add_tail(&txqi->schedule_order,
|
|
&local->active_txqs[txq->ac]);
|
|
}
|
|
-}
|
|
-EXPORT_SYMBOL(ieee80211_return_txq);
|
|
|
|
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
|
- struct ieee80211_txq *txq)
|
|
- __acquires(txq_lock) __releases(txq_lock)
|
|
-{
|
|
- struct ieee80211_local *local = hw_to_local(hw);
|
|
- struct txq_info *txqi = to_txq_info(txq);
|
|
-
|
|
- spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
|
- ieee80211_return_txq(hw, txq);
|
|
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
|
|
}
|
|
EXPORT_SYMBOL(ieee80211_schedule_txq);
|
|
@@ -3703,7 +3697,7 @@ bool ieee80211_txq_may_transmit(struct i
|
|
struct sta_info *sta;
|
|
u8 ac = txq->ac;
|
|
|
|
- lockdep_assert_held(&local->active_txq_lock[ac]);
|
|
+ spin_lock_bh(&local->active_txq_lock[ac]);
|
|
|
|
if (!txqi->txq.sta)
|
|
goto out;
|
|
@@ -3733,34 +3727,27 @@ bool ieee80211_txq_may_transmit(struct i
|
|
|
|
sta->airtime[ac].deficit += sta->airtime_weight;
|
|
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
|
|
+ spin_unlock_bh(&local->active_txq_lock[ac]);
|
|
|
|
return false;
|
|
out:
|
|
if (!list_empty(&txqi->schedule_order))
|
|
list_del_init(&txqi->schedule_order);
|
|
+ spin_unlock_bh(&local->active_txq_lock[ac]);
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
|
|
|
|
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
|
- __acquires(txq_lock)
|
|
{
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
spin_lock_bh(&local->active_txq_lock[ac]);
|
|
local->schedule_round[ac]++;
|
|
-}
|
|
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
|
-
|
|
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
|
- __releases(txq_lock)
|
|
-{
|
|
- struct ieee80211_local *local = hw_to_local(hw);
|
|
-
|
|
spin_unlock_bh(&local->active_txq_lock[ac]);
|
|
}
|
|
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
|
|
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
|
|
|
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
struct net_device *dev,
|