106 lines
3.1 KiB
Diff
106 lines
3.1 KiB
Diff
From: Tom Herbert <therbert@google.com>
|
|
Date: Mon, 28 Nov 2011 16:33:23 +0000
|
|
Subject: forcedeth: Support for byte queue limits
|
|
|
|
commit b8bfca9439d4ed03446bc9a3fdaef81b364d32dd upstream.
|
|
|
|
Changes to forcedeth to use byte queue limits.
|
|
|
|
Signed-off-by: Tom Herbert <therbert@google.com>
|
|
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
---
|
|
drivers/net/ethernet/nvidia/forcedeth.c | 18 ++++++++++++++++++
|
|
1 file changed, 18 insertions(+)
|
|
|
|
--- a/drivers/net/ethernet/nvidia/forcedeth.c
|
|
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
|
|
@@ -1849,6 +1849,7 @@
|
|
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
|
|
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
|
|
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
|
|
+ netdev_reset_queue(np->dev);
|
|
np->tx_pkts_in_progress = 0;
|
|
np->tx_change_owner = NULL;
|
|
np->tx_end_flip = NULL;
|
|
@@ -2194,6 +2195,9 @@
|
|
|
|
/* set tx flags */
|
|
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
|
|
+
|
|
+ netdev_sent_queue(np->dev, skb->len);
|
|
+
|
|
np->put_tx.orig = put_tx;
|
|
|
|
spin_unlock_irqrestore(&np->lock, flags);
|
|
@@ -2338,6 +2342,9 @@
|
|
|
|
/* set tx flags */
|
|
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
|
|
+
|
|
+ netdev_sent_queue(np->dev, skb->len);
|
|
+
|
|
np->put_tx.ex = put_tx;
|
|
|
|
spin_unlock_irqrestore(&np->lock, flags);
|
|
@@ -2375,6 +2382,7 @@
|
|
u32 flags;
|
|
int tx_work = 0;
|
|
struct ring_desc *orig_get_tx = np->get_tx.orig;
|
|
+ unsigned int bytes_compl = 0;
|
|
|
|
while ((np->get_tx.orig != np->put_tx.orig) &&
|
|
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
|
|
@@ -2391,6 +2399,7 @@
|
|
dev->stats.tx_packets++;
|
|
dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
|
|
}
|
|
+ bytes_compl += np->get_tx_ctx->skb->len;
|
|
dev_kfree_skb_any(np->get_tx_ctx->skb);
|
|
np->get_tx_ctx->skb = NULL;
|
|
tx_work++;
|
|
@@ -2404,6 +2413,7 @@
|
|
dev->stats.tx_packets++;
|
|
dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
|
|
}
|
|
+ bytes_compl += np->get_tx_ctx->skb->len;
|
|
dev_kfree_skb_any(np->get_tx_ctx->skb);
|
|
np->get_tx_ctx->skb = NULL;
|
|
tx_work++;
|
|
@@ -2414,6 +2424,9 @@
|
|
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
|
|
np->get_tx_ctx = np->first_tx_ctx;
|
|
}
|
|
+
|
|
+ netdev_completed_queue(np->dev, tx_work, bytes_compl);
|
|
+
|
|
if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
|
|
np->tx_stop = 0;
|
|
netif_wake_queue(dev);
|
|
@@ -2427,6 +2440,7 @@
|
|
u32 flags;
|
|
int tx_work = 0;
|
|
struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
|
|
+ unsigned long bytes_cleaned = 0;
|
|
|
|
while ((np->get_tx.ex != np->put_tx.ex) &&
|
|
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
|
|
@@ -2447,6 +2461,7 @@
|
|
dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
|
|
}
|
|
|
|
+ bytes_cleaned += np->get_tx_ctx->skb->len;
|
|
dev_kfree_skb_any(np->get_tx_ctx->skb);
|
|
np->get_tx_ctx->skb = NULL;
|
|
tx_work++;
|
|
@@ -2454,6 +2469,9 @@
|
|
if (np->tx_limit)
|
|
nv_tx_flip_ownership(dev);
|
|
}
|
|
+
|
|
+ netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
|
|
+
|
|
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
|
|
np->get_tx.ex = np->first_tx.ex;
|
|
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
|