aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-12-20 18:48:20 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-03-28 12:06:01 -0700
commitbbd3cfb8cbb326f32f1daec0ea6ffbf855a7ecc8 (patch)
tree3542c319ffc731dbc351835afb114ac6d81a1dda /drivers/net
parent16cec22e5099020edb0ba8b6ae8f1b011e2ec4d5 (diff)
downloadkernel_samsung_smdk4412-bbd3cfb8cbb326f32f1daec0ea6ffbf855a7ecc8.zip
kernel_samsung_smdk4412-bbd3cfb8cbb326f32f1daec0ea6ffbf855a7ecc8.tar.gz
kernel_samsung_smdk4412-bbd3cfb8cbb326f32f1daec0ea6ffbf855a7ecc8.tar.bz2
sfc: Properly sync RX DMA buffer when it is not the last in the page
[ Upstream commit 3a68f19d7afb80f548d016effbc6ed52643a8085 ] We may currently allocate two RX DMA buffers to a page, and only unmap the page when the second is completed. We do not sync the first RX buffer to be completed; this can result in packet loss or corruption if the last RX buffer completed in a NAPI poll is the first in a page and is not DMA-coherent. (In the middle of a NAPI poll, we will handle the following RX completion and unmap the page *before* looking at the content of the first buffer.) Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> [bwh: Backported to 3.0: adjust context] Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sfc/rx.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 4004fc2..ce6f53f 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -245,7 +245,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
}
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+ struct efx_rx_buffer *rx_buf,
+ unsigned int used_len)
{
if (rx_buf->is_page && rx_buf->u.page) {
struct efx_rx_page_state *state;
@@ -256,6 +257,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
+ } else if (used_len) {
+ dma_sync_single_for_cpu(&efx->pci_dev->dev,
+ rx_buf->dma_addr, used_len,
+ DMA_FROM_DEVICE);
}
} else if (!rx_buf->is_page && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -278,7 +283,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
@@ -549,10 +554,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out;
}
- /* Release card resources - assumes all RX buffers consumed in-order
- * per RX queue
+ /* Release and/or sync DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue
*/
- efx_unmap_rx_buffer(efx, rx_buf);
+ efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.