aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/octeon/ethernet-tx.c
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2009-06-23 16:20:56 -0700
committerRalf Baechle <ralf@linux-mips.org>2009-06-24 18:34:41 +0100
commita620c1632629b42369e78448acc7b384fe1faf48 (patch)
tree3318683c03abb4ca45307c7df0019f74bcba3b13 /drivers/staging/octeon/ethernet-tx.c
parentf696a10838ffab85e5bc07e7cff0d0e1870a30d7 (diff)
downloadkernel_samsung_smdk4412-a620c1632629b42369e78448acc7b384fe1faf48.zip
kernel_samsung_smdk4412-a620c1632629b42369e78448acc7b384fe1faf48.tar.gz
kernel_samsung_smdk4412-a620c1632629b42369e78448acc7b384fe1faf48.tar.bz2
Staging: octeon-ethernet: Fix race freeing transmit buffers.
The existing code had the following race: Thread-1 Thread-2 inc/read in_use inc/read in_use inc tx_free_list[qos].len inc tx_free_list[qos].len The actual in_use value was incremented twice, but thread-1 is going to free memory based on its stale value, and will free one too many times. The result is that memory is freed back to the kernel while its packet is still in the transmit buffer. If the memory is overwritten before it is transmitted, the hardware will put a valid checksum on it and send it out (just like it does with good packets). If by chance the TCP flags are clobbered but not the addresses or ports, the result can be a broken TCP stream. The fix is to track the number of freed packets in a single location (a Fetch-and-Add Unit register). That way it can never get out of sync with itself. We try to free up to MAX_SKB_TO_FREE (currently 10) buffers at a time. If fewer are available we adjust the free count with the difference. The action of claiming buffers to free is atomic so two threads cannot claim the same buffers. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'drivers/staging/octeon/ethernet-tx.c')
-rw-r--r--drivers/staging/octeon/ethernet-tx.c56
1 files changed, 33 insertions, 23 deletions
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index bfd3dd2..81a8513 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -47,6 +47,7 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-tx.h"
#include "ethernet-util.h"
#include "cvmx-wqe.h"
@@ -82,8 +83,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
uint64_t old_scratch2;
int dropped;
int qos;
+ int queue_it_up;
struct octeon_ethernet *priv = netdev_priv(dev);
- int32_t in_use;
+ int32_t skb_to_free;
+ int32_t undo;
int32_t buffers_to_free;
#if REUSE_SKBUFFS_WITHOUT_FREE
unsigned char *fpa_head;
@@ -120,15 +123,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
/*
- * Assume we're going to be able t osend this
- * packet. Fetch and increment the number of pending
- * packets for output.
+ * Fetch and increment the number of packets to be
+ * freed.
*/
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
FAU_NUM_PACKET_BUFFERS_TO_FREE,
0);
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4, 1);
+ priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
}
/*
@@ -286,16 +289,30 @@ dont_put_skbuff_in_hw:
if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
- in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
} else {
/* Get the number of skbuffs in use by the hardware */
- in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
buffers_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
/*
+ * We try to claim MAX_SKB_TO_FREE buffers. If there were not
+ * that many available, we have to un-claim (undo) any that
+ * were in excess. If skb_to_free is positive we will free
+ * that many buffers.
+ */
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+
+ /*
* If we're sending faster than the receive can free them then
* don't do the HW free.
*/
@@ -330,38 +347,31 @@ dont_put_skbuff_in_hw:
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
}
+ queue_it_up = 0;
if (unlikely(dropped)) {
dev_kfree_skb_any(skb);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
priv->stats.tx_dropped++;
} else {
if (USE_SKBUFFS_IN_HW) {
/* Put this packet on the queue to be freed later */
if (pko_command.s.dontfree)
- skb_queue_tail(&priv->tx_free_list[qos], skb);
- else {
+ queue_it_up = 1;
+ else
cvmx_fau_atomic_add32
(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
- }
} else {
/* Put this packet on the queue to be freed later */
- skb_queue_tail(&priv->tx_free_list[qos], skb);
+ queue_it_up = 1;
}
}
- /* Free skbuffs not in use by the hardware, possibly two at a time */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
+ if (queue_it_up) {
spin_lock(&priv->tx_free_list[qos].lock);
- /*
- * Check again now that we have the lock. It might
- * have changed.
- */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ __skb_queue_tail(&priv->tx_free_list[qos], skb);
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
spin_unlock(&priv->tx_free_list[qos].lock);
+ } else {
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
}
return 0;