ramips: Rework ramips_eth to not require irqsave locking anymore

Previously the tx housekeeping was done in a spin_lock_irqsave critical
section which causes irqs to be disabled during that time. Since the
housekeeping is already prepared to be scheduled as a tasklet process
the housekeeping only in softirq context and revise the locking between
the tx path and the housekeeping tasklet by using a normal spin_lock
which in most situations will be a NOP anyway.

This makes sure that interrupts are only disabled for a short time
since in the worst case the housekeeping might have to free up to 256
skbs.

Signed-off-by: Helmut Schaa <helmut.schaa@googlemail.com>

SVN-Revision: 29762
This commit is contained in:
John Crispin 2012-01-17 11:23:11 +00:00
parent 2683b5b15d
commit a2708d11e8

View File

@ -215,7 +215,6 @@ ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long tx; unsigned long tx;
unsigned int tx_next; unsigned int tx_next;
unsigned int mapped_addr; unsigned int mapped_addr;
unsigned long flags;
if (priv->plat->min_pkt_len) { if (priv->plat->min_pkt_len) {
if (skb->len < priv->plat->min_pkt_len) { if (skb->len < priv->plat->min_pkt_len) {
@ -233,7 +232,7 @@ ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len, mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE); dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
spin_lock_irqsave(&priv->page_lock, flags); spin_lock(&priv->page_lock);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0); tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC; tx_next = (tx + 1) % NUM_TX_DESC;
@ -250,11 +249,11 @@ ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_skb[tx] = skb; priv->tx_skb[tx] = skb;
wmb(); wmb();
ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0); ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
spin_unlock_irqrestore(&priv->page_lock, flags); spin_unlock(&priv->page_lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
out: out:
spin_unlock_irqrestore(&priv->page_lock, flags); spin_unlock(&priv->page_lock);
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -313,6 +312,7 @@ ramips_eth_tx_housekeeping(unsigned long ptr)
struct net_device *dev = (struct net_device*)ptr; struct net_device *dev = (struct net_device*)ptr;
struct raeth_priv *priv = netdev_priv(dev); struct raeth_priv *priv = netdev_priv(dev);
spin_lock(&priv->page_lock);
while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) && while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
(priv->tx_skb[priv->skb_free_idx])) { (priv->tx_skb[priv->skb_free_idx])) {
dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]); dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
@ -321,6 +321,7 @@ ramips_eth_tx_housekeeping(unsigned long ptr)
if (priv->skb_free_idx >= NUM_TX_DESC) if (priv->skb_free_idx >= NUM_TX_DESC)
priv->skb_free_idx = 0; priv->skb_free_idx = 0;
} }
spin_unlock(&priv->page_lock);
ramips_fe_int_enable(RAMIPS_TX_DLY_INT); ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
} }
@ -346,8 +347,10 @@ ramips_eth_irq(int irq, void *dev)
tasklet_schedule(&priv->rx_tasklet); tasklet_schedule(&priv->rx_tasklet);
} }
if (fe_int & RAMIPS_TX_DLY_INT) if (fe_int & RAMIPS_TX_DLY_INT) {
ramips_eth_tx_housekeeping((unsigned long)dev); ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
tasklet_schedule(&priv->tx_housekeeping_tasklet);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }