aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMichael Brown <mcb30@ipxe.org>2020-11-26 12:25:02 +0000
committerMichael Brown <mcb30@ipxe.org>2020-11-28 20:26:28 +0000
commit8d337ecdae3c6d555ea57996bc2280debd984a9c (patch)
tree9a572ccfe77432a735a0df76f2a91e5fe93cefcf /src
parent70e6e83243b77a3771756106871d6f945062a44b (diff)
downloadipxe-8d337ecdae3c6d555ea57996bc2280debd984a9c.tar.gz
[dma] Move I/O buffer DMA operations to iobuf.h
Include a potential DMA mapping within the definition of an I/O buffer, and move all I/O buffer DMA mapping functions from dma.h to iobuf.h. This avoids the need for drivers to maintain a separate list of DMA mappings for each I/O buffer that they may handle. Network device drivers typically do not keep track of transmit I/O buffers, since the network device core already maintains a transmit queue. Drivers will typically call netdev_tx_complete_next() to complete a transmission without first obtaining the relevant I/O buffer pointer (and will rely on the network device core automatically cancelling any pending transmissions when the device is closed). To allow this driver design approach to be retained, update the netdev_tx_complete() family of functions to automatically perform the DMA unmapping operation if required. For symmetry, also update the netdev_rx() family of functions to behave the same way. As a further convenience for drivers, allow the network device core to automatically perform DMA mapping on the transmit datapath before calling the driver's transmit() method. This avoids the need to introduce a mapping error handling code path into the typically error-free transmit methods. With these changes, the modifications required to update a typical network device driver to use the new DMA API are fairly minimal: - Allocate and free descriptor rings and similar coherent structures using dma_alloc()/dma_free() rather than malloc_phys()/free_phys() - Allocate and free receive buffers using alloc_rx_iob()/free_rx_iob() rather than alloc_iob()/free_iob() - Calculate DMA addresses using dma() or iob_dma() rather than virt_to_bus() - Set a 64-bit DMA mask if needed using dma_set_mask_64bit() and thereafter eliminate checks on DMA address ranges - Either record the DMA device in netdev->dma, or call iob_map_tx() as part of the transmit() method - Ensure that debug messages use virt_to_phys() when displaying "hardware" addresses Signed-off-by: Michael Brown <mcb30@ipxe.org>
Diffstat (limited to 'src')
-rw-r--r--src/core/dma.c41
-rw-r--r--src/core/iobuf.c45
-rw-r--r--src/drivers/net/intel.c120
-rw-r--r--src/drivers/net/intel.h46
-rw-r--r--src/drivers/net/intelx.c21
-rw-r--r--src/drivers/net/intelxl.c134
-rw-r--r--src/drivers/net/intelxl.h30
-rw-r--r--src/drivers/net/intelxlvf.c33
-rw-r--r--src/drivers/net/intelxvf.c21
-rw-r--r--src/drivers/net/realtek.c101
-rw-r--r--src/drivers/net/realtek.h24
-rw-r--r--src/include/ipxe/dma.h22
-rw-r--r--src/include/ipxe/iobuf.h55
-rw-r--r--src/include/ipxe/netdevice.h6
-rw-r--r--src/interface/efi/efi_pci.c2
-rw-r--r--src/net/netdevice.c32
16 files changed, 342 insertions, 391 deletions
diff --git a/src/core/dma.c b/src/core/dma.c
index 561aec1e1..e5fa3f323 100644
--- a/src/core/dma.c
+++ b/src/core/dma.c
@@ -25,7 +25,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <assert.h>
#include <errno.h>
-#include <ipxe/iobuf.h>
#include <ipxe/dma.h>
/** @file
@@ -139,43 +138,3 @@ PROVIDE_DMAAPI ( op, dma_alloc, dma_op_alloc );
PROVIDE_DMAAPI ( op, dma_free, dma_op_free );
PROVIDE_DMAAPI ( op, dma_set_mask, dma_op_set_mask );
PROVIDE_DMAAPI_INLINE ( op, dma_phys );
-
-/******************************************************************************
- *
- * Utility functions
- *
- ******************************************************************************
- */
-
-/**
- * Allocate and map I/O buffer for receiving data from device
- *
- * @v dma DMA device
- * @v map DMA mapping to fill in
- * @v len Length of I/O buffer
- * @ret iobuf I/O buffer, or NULL on error
- */
-struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma,
- struct dma_mapping *map,
- size_t len ) {
- struct io_buffer *iobuf;
- int rc;
-
- /* Allocate I/O buffer */
- iobuf = alloc_iob ( len );
- if ( ! iobuf )
- goto err_alloc;
-
- /* Map I/O buffer */
- if ( ( rc = dma_map ( dma, map, virt_to_phys ( iobuf->data ),
- len, DMA_RX ) ) != 0 )
- goto err_map;
-
- return iobuf;
-
- dma_unmap ( map );
- err_map:
- free_iob ( iobuf );
- err_alloc:
- return NULL;
-}
diff --git a/src/core/iobuf.c b/src/core/iobuf.c
index 941bb3446..c9970bc76 100644
--- a/src/core/iobuf.c
+++ b/src/core/iobuf.c
@@ -110,6 +110,7 @@ struct io_buffer * alloc_iob_raw ( size_t len, size_t align, size_t offset ) {
}
/* Populate descriptor */
+ memset ( &iobuf->map, 0, sizeof ( iobuf->map ) );
iobuf->head = iobuf->data = iobuf->tail = data;
iobuf->end = ( data + len );
@@ -153,6 +154,7 @@ void free_iob ( struct io_buffer *iobuf ) {
assert ( iobuf->head <= iobuf->data );
assert ( iobuf->data <= iobuf->tail );
assert ( iobuf->tail <= iobuf->end );
+ assert ( ! dma_mapped ( &iobuf->map ) );
/* Free buffer */
len = ( iobuf->end - iobuf->head );
@@ -170,6 +172,49 @@ void free_iob ( struct io_buffer *iobuf ) {
}
/**
+ * Allocate and map I/O buffer for receive DMA
+ *
+ * @v len Length of I/O buffer
+ * @v dma DMA device
+ * @ret iobuf I/O buffer, or NULL on error
+ */
+struct io_buffer * alloc_rx_iob ( size_t len, struct dma_device *dma ) {
+ struct io_buffer *iobuf;
+ int rc;
+
+ /* Allocate I/O buffer */
+ iobuf = alloc_iob ( len );
+ if ( ! iobuf )
+ goto err_alloc;
+
+ /* Map I/O buffer */
+ if ( ( rc = iob_map_rx ( iobuf, dma ) ) != 0 )
+ goto err_map;
+
+ return iobuf;
+
+ iob_unmap ( iobuf );
+ err_map:
+ free_iob ( iobuf );
+ err_alloc:
+ return NULL;
+}
+
+/**
+ * Unmap and free I/O buffer for receive DMA
+ *
+ * @v iobuf I/O buffer
+ */
+void free_rx_iob ( struct io_buffer *iobuf ) {
+
+ /* Unmap I/O buffer */
+ iob_unmap ( iobuf );
+
+ /* Free I/O buffer */
+ free_iob ( iobuf );
+}
+
+/**
* Ensure I/O buffer has sufficient headroom
*
* @v iobuf I/O buffer
diff --git a/src/drivers/net/intel.c b/src/drivers/net/intel.c
index 93c5fd1f6..83492961f 100644
--- a/src/drivers/net/intel.c
+++ b/src/drivers/net/intel.c
@@ -568,34 +568,30 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
void intel_refill_rx ( struct intel_nic *intel ) {
struct intel_descriptor *rx;
struct io_buffer *iobuf;
- struct dma_mapping *map;
unsigned int rx_idx;
unsigned int rx_tail;
unsigned int refilled = 0;
/* Refill ring */
- while ( ( intel->rx.ring.prod -
- intel->rx.ring.cons ) < INTEL_RX_FILL ) {
-
- /* Get next receive descriptor */
- rx_idx = ( intel->rx.ring.prod % INTEL_NUM_RX_DESC );
- rx = &intel->rx.ring.desc[rx_idx];
- map = &intel->rx.map[rx_idx];
- assert ( intel->rx.iobuf[rx_idx] == NULL );
+ while ( ( intel->rx.prod - intel->rx.cons ) < INTEL_RX_FILL ) {
/* Allocate I/O buffer */
- iobuf = dma_alloc_rx_iob ( intel->dma, map, INTEL_RX_MAX_LEN );
+ iobuf = alloc_rx_iob ( INTEL_RX_MAX_LEN, intel->dma );
if ( ! iobuf ) {
/* Wait for next refill */
break;
}
- intel->rx.iobuf[rx_idx] = iobuf;
- /* Update producer index */
- intel->rx.ring.prod++;
+ /* Get next receive descriptor */
+ rx_idx = ( intel->rx.prod++ % INTEL_NUM_RX_DESC );
+ rx = &intel->rx.desc[rx_idx];
/* Populate receive descriptor */
- intel->rx.ring.describe ( rx, dma ( map, iobuf->data ), 0 );
+ intel->rx.describe ( rx, iob_dma ( iobuf ), 0 );
+
+ /* Record I/O buffer */
+ assert ( intel->rx_iobuf[rx_idx] == NULL );
+ intel->rx_iobuf[rx_idx] = iobuf;
DBGC2 ( intel, "INTEL %p RX %d is [%lx,%lx)\n",
intel, rx_idx, virt_to_phys ( iobuf->data ),
@@ -606,40 +602,27 @@ void intel_refill_rx ( struct intel_nic *intel ) {
/* Push descriptors to card, if applicable */
if ( refilled ) {
wmb();
- rx_tail = ( intel->rx.ring.prod % INTEL_NUM_RX_DESC );
+ rx_tail = ( intel->rx.prod % INTEL_NUM_RX_DESC );
profile_start ( &intel_vm_refill_profiler );
- writel ( rx_tail,
- intel->regs + intel->rx.ring.reg + INTEL_xDT );
+ writel ( rx_tail, intel->regs + intel->rx.reg + INTEL_xDT );
profile_stop ( &intel_vm_refill_profiler );
profile_exclude ( &intel_vm_refill_profiler );
}
}
/**
- * Flush unused I/O buffers
+ * Discard unused receive I/O buffers
*
* @v intel Intel device
- *
- * Discard any unused receive I/O buffers and unmap any incomplete
- * transmit I/O buffers.
*/
-void intel_flush ( struct intel_nic *intel ) {
+void intel_empty_rx ( struct intel_nic *intel ) {
unsigned int i;
- unsigned int tx_idx;
/* Discard unused receive buffers */
for ( i = 0 ; i < INTEL_NUM_RX_DESC ; i++ ) {
- if ( intel->rx.iobuf[i] ) {
- dma_unmap ( &intel->rx.map[i] );
- free_iob ( intel->rx.iobuf[i] );
- }
- intel->rx.iobuf[i] = NULL;
- }
-
- /* Unmap incomplete transmit buffers */
- for ( i = intel->tx.ring.cons ; i != intel->tx.ring.prod ; i++ ) {
- tx_idx = ( i % INTEL_NUM_TX_DESC );
- dma_unmap ( &intel->tx.map[tx_idx] );
+ if ( intel->rx_iobuf[i] )
+ free_rx_iob ( intel->rx_iobuf[i] );
+ intel->rx_iobuf[i] = NULL;
}
}
@@ -670,11 +653,11 @@ static int intel_open ( struct net_device *netdev ) {
}
/* Create transmit descriptor ring */
- if ( ( rc = intel_create_ring ( intel, &intel->tx.ring ) ) != 0 )
+ if ( ( rc = intel_create_ring ( intel, &intel->tx ) ) != 0 )
goto err_create_tx;
/* Create receive descriptor ring */
- if ( ( rc = intel_create_ring ( intel, &intel->rx.ring ) ) != 0 )
+ if ( ( rc = intel_create_ring ( intel, &intel->rx ) ) != 0 )
goto err_create_rx;
/* Program MAC address */
@@ -713,9 +696,9 @@ static int intel_open ( struct net_device *netdev ) {
return 0;
- intel_destroy_ring ( intel, &intel->rx.ring );
+ intel_destroy_ring ( intel, &intel->rx );
err_create_rx:
- intel_destroy_ring ( intel, &intel->tx.ring );
+ intel_destroy_ring ( intel, &intel->tx );
err_create_tx:
return rc;
}
@@ -735,13 +718,13 @@ static void intel_close ( struct net_device *netdev ) {
writel ( 0, intel->regs + INTEL_TCTL );
/* Destroy receive descriptor ring */
- intel_destroy_ring ( intel, &intel->rx.ring );
+ intel_destroy_ring ( intel, &intel->rx );
- /* Flush unused buffers */
- intel_flush ( intel );
+ /* Discard any unused receive buffers */
+ intel_empty_rx ( intel );
/* Destroy transmit descriptor ring */
- intel_destroy_ring ( intel, &intel->tx.ring );
+ intel_destroy_ring ( intel, &intel->tx );
/* Reset the NIC, to flush the transmit and receive FIFOs */
intel_reset ( intel );
@@ -757,37 +740,27 @@ static void intel_close ( struct net_device *netdev ) {
int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
struct intel_nic *intel = netdev->priv;
struct intel_descriptor *tx;
- struct dma_mapping *map;
unsigned int tx_idx;
unsigned int tx_tail;
size_t len;
- int rc;
/* Get next transmit descriptor */
- if ( ( intel->tx.ring.prod - intel->tx.ring.cons ) >= INTEL_TX_FILL ) {
+ if ( ( intel->tx.prod - intel->tx.cons ) >= INTEL_TX_FILL ) {
DBGC ( intel, "INTEL %p out of transmit descriptors\n", intel );
return -ENOBUFS;
}
- tx_idx = ( intel->tx.ring.prod % INTEL_NUM_TX_DESC );
- tx = &intel->tx.ring.desc[tx_idx];
- map = &intel->tx.map[tx_idx];
-
- /* Map I/O buffer */
- if ( ( rc = dma_map_tx_iob ( intel->dma, map, iobuf ) ) != 0 )
- return rc;
-
- /* Update producer index */
- intel->tx.ring.prod++;
+ tx_idx = ( intel->tx.prod++ % INTEL_NUM_TX_DESC );
+ tx_tail = ( intel->tx.prod % INTEL_NUM_TX_DESC );
+ tx = &intel->tx.desc[tx_idx];
/* Populate transmit descriptor */
len = iob_len ( iobuf );
- intel->tx.ring.describe ( tx, dma ( map, iobuf->data ), len );
+ intel->tx.describe ( tx, iob_dma ( iobuf ), len );
wmb();
/* Notify card that there are packets ready to transmit */
profile_start ( &intel_vm_tx_profiler );
- tx_tail = ( intel->tx.ring.prod % INTEL_NUM_TX_DESC );
- writel ( tx_tail, intel->regs + intel->tx.ring.reg + INTEL_xDT );
+ writel ( tx_tail, intel->regs + intel->tx.reg + INTEL_xDT );
profile_stop ( &intel_vm_tx_profiler );
profile_exclude ( &intel_vm_tx_profiler );
@@ -809,11 +782,11 @@ void intel_poll_tx ( struct net_device *netdev ) {
unsigned int tx_idx;
/* Check for completed packets */
- while ( intel->tx.ring.cons != intel->tx.ring.prod ) {
+ while ( intel->tx.cons != intel->tx.prod ) {
/* Get next transmit descriptor */
- tx_idx = ( intel->tx.ring.cons % INTEL_NUM_TX_DESC );
- tx = &intel->tx.ring.desc[tx_idx];
+ tx_idx = ( intel->tx.cons % INTEL_NUM_TX_DESC );
+ tx = &intel->tx.desc[tx_idx];
/* Stop if descriptor is still in use */
if ( ! ( tx->status & cpu_to_le32 ( INTEL_DESC_STATUS_DD ) ) )
@@ -821,12 +794,9 @@ void intel_poll_tx ( struct net_device *netdev ) {
DBGC2 ( intel, "INTEL %p TX %d complete\n", intel, tx_idx );
- /* Unmap I/O buffer */
- dma_unmap ( &intel->tx.map[tx_idx] );
-
/* Complete TX descriptor */
netdev_tx_complete_next ( netdev );
- intel->tx.ring.cons++;
+ intel->tx.cons++;
}
}
@@ -843,22 +813,19 @@ void intel_poll_rx ( struct net_device *netdev ) {
size_t len;
/* Check for received packets */
- while ( intel->rx.ring.cons != intel->rx.ring.prod ) {
+ while ( intel->rx.cons != intel->rx.prod ) {
/* Get next receive descriptor */
- rx_idx = ( intel->rx.ring.cons % INTEL_NUM_RX_DESC );
- rx = &intel->rx.ring.desc[rx_idx];
+ rx_idx = ( intel->rx.cons % INTEL_NUM_RX_DESC );
+ rx = &intel->rx.desc[rx_idx];
/* Stop if descriptor is still in use */
if ( ! ( rx->status & cpu_to_le32 ( INTEL_DESC_STATUS_DD ) ) )
return;
- /* Unmap I/O buffer */
- dma_unmap ( &intel->rx.map[rx_idx] );
-
/* Populate I/O buffer */
- iobuf = intel->rx.iobuf[rx_idx];
- intel->rx.iobuf[rx_idx] = NULL;
+ iobuf = intel->rx_iobuf[rx_idx];
+ intel->rx_iobuf[rx_idx] = NULL;
len = le16_to_cpu ( rx->length );
iob_put ( iobuf, len );
@@ -873,7 +840,7 @@ void intel_poll_rx ( struct net_device *netdev ) {
intel, rx_idx, len );
netdev_rx ( netdev, iobuf );
}
- intel->rx.ring.cons++;
+ intel->rx.cons++;
}
}
@@ -981,9 +948,9 @@ static int intel_probe ( struct pci_device *pci ) {
memset ( intel, 0, sizeof ( *intel ) );
intel->port = PCI_FUNC ( pci->busdevfn );
intel->flags = pci->id->driver_data;
- intel_init_ring ( &intel->tx.ring, INTEL_NUM_TX_DESC, INTEL_TD,
+ intel_init_ring ( &intel->tx, INTEL_NUM_TX_DESC, INTEL_TD,
intel_describe_tx );
- intel_init_ring ( &intel->rx.ring, INTEL_NUM_RX_DESC, INTEL_RD,
+ intel_init_ring ( &intel->rx, INTEL_NUM_RX_DESC, INTEL_RD,
intel_describe_rx );
/* Fix up PCI device */
@@ -999,6 +966,7 @@ static int intel_probe ( struct pci_device *pci ) {
/* Configure DMA */
intel->dma = &pci->dma;
dma_set_mask_64bit ( intel->dma );
+ netdev->dma = intel->dma;
/* Reset the NIC */
if ( ( rc = intel_reset ( intel ) ) != 0 )
diff --git a/src/drivers/net/intel.h b/src/drivers/net/intel.h
index 731b5f225..4f51a80f6 100644
--- a/src/drivers/net/intel.h
+++ b/src/drivers/net/intel.h
@@ -276,24 +276,6 @@ intel_init_mbox ( struct intel_mailbox *mbox, unsigned int ctrl,
mbox->mem = mem;
}
-/** Transmit ring */
-struct intel_tx_ring {
- /** Descriptor ring */
- struct intel_ring ring;
- /** DMA mappings */
- struct dma_mapping map[INTEL_NUM_TX_DESC];
-};
-
-/** Receive ring */
-struct intel_rx_ring {
- /** Descriptor ring */
- struct intel_ring ring;
- /** I/O buffers */
- struct io_buffer *iobuf[INTEL_NUM_RX_DESC];
- /** DMA mappings */
- struct dma_mapping map[INTEL_NUM_RX_DESC];
-};
-
/** An Intel network card */
struct intel_nic {
/** Registers */
@@ -317,10 +299,12 @@ struct intel_nic {
/** Mailbox */
struct intel_mailbox mbox;
- /** Transmit ring */
- struct intel_tx_ring tx;
- /** Receive ring */
- struct intel_rx_ring rx;
+ /** Transmit descriptor ring */
+ struct intel_ring tx;
+ /** Receive descriptor ring */
+ struct intel_ring rx;
+ /** Receive I/O buffers */
+ struct io_buffer *rx_iobuf[INTEL_NUM_RX_DESC];
};
/** Driver flags */
@@ -349,14 +333,14 @@ static inline void intel_diag ( struct intel_nic *intel ) {
DBGC ( intel, "INTEL %p TX %04x(%02x)/%04x(%02x) "
"RX %04x(%02x)/%04x(%02x)\n", intel,
- ( intel->tx.ring.cons & 0xffff ),
- readl ( intel->regs + intel->tx.ring.reg + INTEL_xDH ),
- ( intel->tx.ring.prod & 0xffff ),
- readl ( intel->regs + intel->tx.ring.reg + INTEL_xDT ),
- ( intel->rx.ring.cons & 0xffff ),
- readl ( intel->regs + intel->rx.ring.reg + INTEL_xDH ),
- ( intel->rx.ring.prod & 0xffff ),
- readl ( intel->regs + intel->rx.ring.reg + INTEL_xDT ) );
+ ( intel->tx.cons & 0xffff ),
+ readl ( intel->regs + intel->tx.reg + INTEL_xDH ),
+ ( intel->tx.prod & 0xffff ),
+ readl ( intel->regs + intel->tx.reg + INTEL_xDT ),
+ ( intel->rx.cons & 0xffff ),
+ readl ( intel->regs + intel->rx.reg + INTEL_xDH ),
+ ( intel->rx.prod & 0xffff ),
+ readl ( intel->regs + intel->rx.reg + INTEL_xDT ) );
}
extern void intel_describe_tx ( struct intel_descriptor *tx,
@@ -371,7 +355,7 @@ extern int intel_create_ring ( struct intel_nic *intel,
extern void intel_destroy_ring ( struct intel_nic *intel,
struct intel_ring *ring );
extern void intel_refill_rx ( struct intel_nic *intel );
-extern void intel_flush ( struct intel_nic *intel );
+extern void intel_empty_rx ( struct intel_nic *intel );
extern int intel_transmit ( struct net_device *netdev,
struct io_buffer *iobuf );
extern void intel_poll_tx ( struct net_device *netdev );
diff --git a/src/drivers/net/intelx.c b/src/drivers/net/intelx.c
index 364ec76c5..ccf6b0648 100644
--- a/src/drivers/net/intelx.c
+++ b/src/drivers/net/intelx.c
@@ -185,11 +185,11 @@ static int intelx_open ( struct net_device *netdev ) {
int rc;
/* Create transmit descriptor ring */
- if ( ( rc = intel_create_ring ( intel, &intel->tx.ring ) ) != 0 )
+ if ( ( rc = intel_create_ring ( intel, &intel->tx ) ) != 0 )
goto err_create_tx;
/* Create receive descriptor ring */
- if ( ( rc = intel_create_ring ( intel, &intel->rx.ring ) ) != 0 )
+ if ( ( rc = intel_create_ring ( intel, &intel->rx ) ) != 0 )
goto err_create_rx;
/* Program MAC address */
@@ -263,9 +263,9 @@ static int intelx_open ( struct net_device *netdev ) {
return 0;
- intel_destroy_ring ( intel, &intel->rx.ring );
+ intel_destroy_ring ( intel, &intel->rx );
err_create_rx:
- intel_destroy_ring ( intel, &intel->tx.ring );
+ intel_destroy_ring ( intel, &intel->tx );
err_create_tx:
return rc;
}
@@ -291,13 +291,13 @@ static void intelx_close ( struct net_device *netdev ) {
writel ( dmatxctl, intel->regs + INTELX_DMATXCTL );
/* Destroy receive descriptor ring */
- intel_destroy_ring ( intel, &intel->rx.ring );
+ intel_destroy_ring ( intel, &intel->rx );
- /* Flush unused buffers */
- intel_flush ( intel );
+ /* Discard any unused receive buffers */
+ intel_empty_rx ( intel );
/* Destroy transmit descriptor ring */
- intel_destroy_ring ( intel, &intel->tx.ring );
+ intel_destroy_ring ( intel, &intel->tx );
/* Reset the NIC, to flush the transmit and receive FIFOs */
intelx_reset ( intel );
@@ -395,9 +395,9 @@ static int intelx_probe ( struct pci_device *pci ) {
netdev->dev = &pci->dev;
memset ( intel, 0, sizeof ( *intel ) );
intel->port = PCI_FUNC ( pci->busdevfn );
- intel_init_ring ( &intel->tx.ring, INTEL_NUM_TX_DESC, INTELX_TD,
+ intel_init_ring ( &intel->tx, INTEL_NUM_TX_DESC, INTELX_TD,
intel_describe_tx );
- intel_init_ring ( &intel->rx.ring, INTEL_NUM_RX_DESC, INTELX_RD,
+ intel_init_ring ( &intel->rx, INTEL_NUM_RX_DESC, INTELX_RD,
intel_describe_rx );
/* Fix up PCI device */
@@ -413,6 +413,7 @@ static int intelx_probe ( struct pci_device *pci ) {
/* Configure DMA */
intel->dma = &pci->dma;
dma_set_mask_64bit ( intel->dma );
+ netdev->dma = intel->dma;
/* Reset the NIC */
if ( ( rc = intelx_reset ( intel ) ) != 0 )
diff --git a/src/drivers/net/intelxl.c b/src/drivers/net/intelxl.c
index 5de432a6a..ac9e37c5a 100644
--- a/src/drivers/net/intelxl.c
+++ b/src/drivers/net/intelxl.c
@@ -1306,36 +1306,32 @@ static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
struct intelxl_rx_data_descriptor *rx;
struct io_buffer *iobuf;
- struct dma_mapping *map;
unsigned int rx_idx;
unsigned int rx_tail;
unsigned int refilled = 0;
/* Refill ring */
- while ( ( intelxl->rx.ring.prod -
- intelxl->rx.ring.cons ) < INTELXL_RX_FILL ) {
-
- /* Get next receive descriptor */
- rx_idx = ( intelxl->rx.ring.prod % INTELXL_RX_NUM_DESC );
- rx = &intelxl->rx.ring.desc.rx[rx_idx].data;
- map = &intelxl->rx.map[rx_idx];
- assert ( intelxl->rx.iobuf[rx_idx] == NULL );
+ while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
/* Allocate I/O buffer */
- iobuf = dma_alloc_rx_iob ( intelxl->dma, map, intelxl->mfs );
+ iobuf = alloc_rx_iob ( intelxl->mfs, intelxl->dma );
if ( ! iobuf ) {
/* Wait for next refill */
break;
}
- intelxl->rx.iobuf[rx_idx] = iobuf;
- /* Update producer index */
- intelxl->rx.ring.prod++;
+ /* Get next receive descriptor */
+ rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
+ rx = &intelxl->rx.desc.rx[rx_idx].data;
/* Populate receive descriptor */
- rx->address = cpu_to_le64 ( dma ( map, iobuf->data ) );
+ rx->address = cpu_to_le64 ( iob_dma ( iobuf ) );
rx->flags = 0;
+ /* Record I/O buffer */
+ assert ( intelxl->rx_iobuf[rx_idx] == NULL );
+ intelxl->rx_iobuf[rx_idx] = iobuf;
+
DBGC2 ( intelxl, "INTELXL %p RX %d is [%08lx,%08lx)\n",
intelxl, rx_idx, virt_to_phys ( iobuf->data ),
( virt_to_phys ( iobuf->data ) + intelxl->mfs ) );
@@ -1345,36 +1341,24 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
/* Push descriptors to card, if applicable */
if ( refilled ) {
wmb();
- rx_tail = ( intelxl->rx.ring.prod % INTELXL_RX_NUM_DESC );
- writel ( rx_tail, ( intelxl->regs + intelxl->rx.ring.tail ) );
+ rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
+ writel ( rx_tail, ( intelxl->regs + intelxl->rx.tail ) );
}
}
/**
- * Flush unused I/O buffers
+ * Discard unused receive I/O buffers
*
* @v intelxl Intel device
- *
- * Discard any unused receive I/O buffers and unmap any incomplete
- * transmit I/O buffers.
*/
-void intelxl_flush ( struct intelxl_nic *intelxl ) {
+void intelxl_empty_rx ( struct intelxl_nic *intelxl ) {
unsigned int i;
- unsigned int tx_idx;
/* Discard any unused receive buffers */
for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
- if ( intelxl->rx.iobuf[i] ) {
- dma_unmap ( &intelxl->rx.map[i] );
- free_iob ( intelxl->rx.iobuf[i] );
- }
- intelxl->rx.iobuf[i] = NULL;
- }
-
- /* Unmap incomplete transmit buffers */
- for ( i = intelxl->tx.ring.cons ; i != intelxl->tx.ring.prod ; i++ ) {
- tx_idx = ( i % INTELXL_TX_NUM_DESC );
- dma_unmap ( &intelxl->tx.map[tx_idx] );
+ if ( intelxl->rx_iobuf[i] )
+ free_rx_iob ( intelxl->rx_iobuf[i] );
+ intelxl->rx_iobuf[i] = NULL;
}
}
@@ -1415,7 +1399,7 @@ static int intelxl_open ( struct net_device *netdev ) {
/* Associate transmit queue to PF */
writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
- ( intelxl->regs + intelxl->tx.ring.reg + INTELXL_QXX_CTL ) );
+ ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
/* Clear transmit pre queue disable */
queue = ( intelxl->base + intelxl->queue );
@@ -1427,11 +1411,11 @@ static int intelxl_open ( struct net_device *netdev ) {
writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
/* Create receive descriptor ring */
- if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx.ring ) ) != 0 )
+ if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
goto err_create_rx;
/* Create transmit descriptor ring */
- if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx.ring ) ) != 0 )
+ if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
goto err_create_tx;
/* Fill receive ring */
@@ -1449,9 +1433,9 @@ static int intelxl_open ( struct net_device *netdev ) {
INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
- intelxl_destroy_ring ( intelxl, &intelxl->tx.ring );
+ intelxl_destroy_ring ( intelxl, &intelxl->tx );
err_create_tx:
- intelxl_destroy_ring ( intelxl, &intelxl->rx.ring );
+ intelxl_destroy_ring ( intelxl, &intelxl->rx );
err_create_rx:
return rc;
}
@@ -1479,13 +1463,13 @@ static void intelxl_close ( struct net_device *netdev ) {
udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
/* Destroy transmit descriptor ring */
- intelxl_destroy_ring ( intelxl, &intelxl->tx.ring );
+ intelxl_destroy_ring ( intelxl, &intelxl->tx );
/* Destroy receive descriptor ring */
- intelxl_destroy_ring ( intelxl, &intelxl->rx.ring );
+ intelxl_destroy_ring ( intelxl, &intelxl->rx );
- /* Flush unused buffers */
- intelxl_flush ( intelxl );
+ /* Discard any unused receive buffers */
+ intelxl_empty_rx ( intelxl );
}
/**
@@ -1498,41 +1482,30 @@ static void intelxl_close ( struct net_device *netdev ) {
int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
struct intelxl_nic *intelxl = netdev->priv;
struct intelxl_tx_data_descriptor *tx;
- struct dma_mapping *map;
unsigned int tx_idx;
unsigned int tx_tail;
size_t len;
- int rc;
/* Get next transmit descriptor */
- if ( ( intelxl->tx.ring.prod -
- intelxl->tx.ring.cons ) >= INTELXL_TX_FILL ) {
+ if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
intelxl );
return -ENOBUFS;
}
- tx_idx = ( intelxl->tx.ring.prod % INTELXL_TX_NUM_DESC );
- tx = &intelxl->tx.ring.desc.tx[tx_idx].data;
- map = &intelxl->tx.map[tx_idx];
-
- /* Map I/O buffer */
- if ( ( rc = dma_map_tx_iob ( intelxl->dma, map, iobuf ) ) != 0 )
- return rc;
-
- /* Update producer index */
- intelxl->tx.ring.prod++;
+ tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
+ tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
+ tx = &intelxl->tx.desc.tx[tx_idx].data;
/* Populate transmit descriptor */
len = iob_len ( iobuf );
- tx->address = cpu_to_le64 ( dma ( map, iobuf->data ) );
+ tx->address = cpu_to_le64 ( iob_dma ( iobuf ) );
tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
wmb();
/* Notify card that there are packets ready to transmit */
- tx_tail = ( intelxl->tx.ring.prod % INTELXL_TX_NUM_DESC );
- writel ( tx_tail, ( intelxl->regs + intelxl->tx.ring.tail ) );
+ writel ( tx_tail, ( intelxl->regs + intelxl->tx.tail ) );
DBGC2 ( intelxl, "INTELXL %p TX %d is [%08lx,%08lx)\n",
intelxl, tx_idx, virt_to_phys ( iobuf->data ),
@@ -1551,11 +1524,11 @@ static void intelxl_poll_tx ( struct net_device *netdev ) {
unsigned int tx_idx;
/* Check for completed packets */
- while ( intelxl->tx.ring.cons != intelxl->tx.ring.prod ) {
+ while ( intelxl->tx.cons != intelxl->tx.prod ) {
/* Get next transmit descriptor */
- tx_idx = ( intelxl->tx.ring.cons % INTELXL_TX_NUM_DESC );
- tx_wb = &intelxl->tx.ring.desc.tx[tx_idx].wb;
+ tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
+ tx_wb = &intelxl->tx.desc.tx[tx_idx].wb;
/* Stop if descriptor is still in use */
if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
@@ -1563,12 +1536,9 @@ static void intelxl_poll_tx ( struct net_device *netdev ) {
DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
intelxl, tx_idx );
- /* Unmap I/O buffer */
- dma_unmap ( &intelxl->tx.map[tx_idx] );
-
/* Complete TX descriptor */
netdev_tx_complete_next ( netdev );
- intelxl->tx.ring.cons++;
+ intelxl->tx.cons++;
}
}
@@ -1586,22 +1556,19 @@ static void intelxl_poll_rx ( struct net_device *netdev ) {
size_t len;
/* Check for received packets */
- while ( intelxl->rx.ring.cons != intelxl->rx.ring.prod ) {
+ while ( intelxl->rx.cons != intelxl->rx.prod ) {
/* Get next receive descriptor */
- rx_idx = ( intelxl->rx.ring.cons % INTELXL_RX_NUM_DESC );
- rx_wb = &intelxl->rx.ring.desc.rx[rx_idx].wb;
+ rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
+ rx_wb = &intelxl->rx.desc.rx[rx_idx].wb;
/* Stop if descriptor is still in use */
if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
return;
- /* Unmap I/O buffer */
- dma_unmap ( &intelxl->rx.map[rx_idx] );
-
/* Populate I/O buffer */
- iobuf = intelxl->rx.iobuf[rx_idx];
- intelxl->rx.iobuf[rx_idx] = NULL;
+ iobuf = intelxl->rx_iobuf[rx_idx];
+ intelxl->rx_iobuf[rx_idx] = NULL;
len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
iob_put ( iobuf, len );
@@ -1623,7 +1590,7 @@ static void intelxl_poll_rx ( struct net_device *netdev ) {
"%zd)\n", intelxl, rx_idx, len );
vlan_netdev_rx ( netdev, tag, iobuf );
}
- intelxl->rx.ring.cons++;
+ intelxl->rx.cons++;
}
}
@@ -1710,11 +1677,11 @@ static int intelxl_probe ( struct pci_device *pci ) {
&intelxl_admin_offsets );
intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
&intelxl_admin_offsets );
- intelxl_init_ring ( &intelxl->tx.ring, INTELXL_TX_NUM_DESC,
- sizeof ( intelxl->tx.ring.desc.tx[0] ),
+ intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
+ sizeof ( intelxl->tx.desc.tx[0] ),
intelxl_context_tx );
- intelxl_init_ring ( &intelxl->rx.ring, INTELXL_RX_NUM_DESC,
- sizeof ( intelxl->rx.ring.desc.rx[0] ),
+ intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
+ sizeof ( intelxl->rx.desc.rx[0] ),
intelxl_context_rx );
/* Fix up PCI device */
@@ -1730,6 +1697,7 @@ static int intelxl_probe ( struct pci_device *pci ) {
/* Configure DMA */
intelxl->dma = &pci->dma;
dma_set_mask_64bit ( intelxl->dma );
+ netdev->dma = intelxl->dma;
/* Reset the NIC */
if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
@@ -1775,10 +1743,10 @@ static int intelxl_probe ( struct pci_device *pci ) {
goto err_admin_promisc;
/* Configure queue register addresses */
- intelxl->tx.ring.reg = INTELXL_QTX ( intelxl->queue );
- intelxl->tx.ring.tail = ( intelxl->tx.ring.reg + INTELXL_QXX_TAIL );
- intelxl->rx.ring.reg = INTELXL_QRX ( intelxl->queue );
- intelxl->rx.ring.tail = ( intelxl->rx.ring.reg + INTELXL_QXX_TAIL );
+ intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
+ intelxl->tx.tail = ( intelxl->tx.reg + INTELXL_QXX_TAIL );
+ intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
+ intelxl->rx.tail = ( intelxl->rx.reg + INTELXL_QXX_TAIL );
/* Configure interrupt causes */
writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
diff --git a/src/drivers/net/intelxl.h b/src/drivers/net/intelxl.h
index cffc0da96..a4a776d28 100644
--- a/src/drivers/net/intelxl.h
+++ b/src/drivers/net/intelxl.h
@@ -1030,24 +1030,6 @@ union intelxl_receive_address {
uint8_t raw[ETH_ALEN];
};
-/** Transmit ring */
-struct intelxl_tx_ring {
- /** Descriptor ring */
- struct intelxl_ring ring;
- /** DMA mappings */
- struct dma_mapping map[INTELXL_TX_NUM_DESC];
-};
-
-/** Receive ring */
-struct intelxl_rx_ring {
- /** Descriptor ring */
- struct intelxl_ring ring;
- /** I/O buffers */
- struct io_buffer *iobuf[INTELXL_RX_NUM_DESC];
- /** DMA mappings */
- struct dma_mapping map[INTELXL_RX_NUM_DESC];
-};
-
/** MSI-X interrupt */
struct intelxl_msix {
/** PCI capability */
@@ -1098,10 +1080,12 @@ struct intelxl_nic {
/** Current VF event data buffer */
union intelxl_admin_buffer vbuf;
- /** Transmit ring */
- struct intelxl_tx_ring tx;
- /** Receive ring */
- struct intelxl_rx_ring rx;
+ /** Transmit descriptor ring */
+ struct intelxl_ring tx;
+ /** Receive descriptor ring */
+ struct intelxl_ring rx;
+ /** Receive I/O buffers */
+ struct io_buffer *rx_iobuf[INTELXL_RX_NUM_DESC];
};
extern int intelxl_msix_enable ( struct intelxl_nic *intelxl,
@@ -1121,7 +1105,7 @@ extern int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
struct intelxl_ring *ring );
extern void intelxl_free_ring ( struct intelxl_nic *intelxl,
struct intelxl_ring *ring );
-extern void intelxl_flush ( struct intelxl_nic *intelxl );
+extern void intelxl_empty_rx ( struct intelxl_nic *intelxl );
extern int intelxl_transmit ( struct net_device *netdev,
struct io_buffer *iobuf );
extern void intelxl_poll ( struct net_device *netdev );
diff --git a/src/drivers/net/intelxlvf.c b/src/drivers/net/intelxlvf.c
index f944b4daa..752de7815 100644
--- a/src/drivers/net/intelxlvf.c
+++ b/src/drivers/net/intelxlvf.c
@@ -380,14 +380,14 @@ static int intelxlvf_admin_configure ( struct net_device *netdev ) {
buf->cfg.count = cpu_to_le16 ( 1 );
buf->cfg.tx.vsi = cpu_to_le16 ( intelxl->vsi );
buf->cfg.tx.count = cpu_to_le16 ( INTELXL_TX_NUM_DESC );
- buf->cfg.tx.base = cpu_to_le64 ( dma ( &intelxl->tx.ring.map,
- intelxl->tx.ring.desc.raw ) );
+ buf->cfg.tx.base = cpu_to_le64 ( dma ( &intelxl->tx.map,
+ intelxl->tx.desc.raw ) );
buf->cfg.rx.vsi = cpu_to_le16 ( intelxl->vsi );
buf->cfg.rx.count = cpu_to_le32 ( INTELXL_RX_NUM_DESC );
buf->cfg.rx.len = cpu_to_le32 ( intelxl->mfs );
buf->cfg.rx.mfs = cpu_to_le32 ( intelxl->mfs );
- buf->cfg.rx.base = cpu_to_le64 ( dma ( &intelxl->rx.ring.map,
- intelxl->rx.ring.desc.raw ) );
+ buf->cfg.rx.base = cpu_to_le64 ( dma ( &intelxl->rx.map,
+ intelxl->rx.desc.raw ) );
/* Issue command */
if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 )
@@ -501,11 +501,11 @@ static int intelxlvf_open ( struct net_device *netdev ) {
INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
/* Allocate transmit descriptor ring */
- if ( ( rc = intelxl_alloc_ring ( intelxl, &intelxl->tx.ring ) ) != 0 )
+ if ( ( rc = intelxl_alloc_ring ( intelxl, &intelxl->tx ) ) != 0 )
goto err_alloc_tx;
/* Allocate receive descriptor ring */
- if ( ( rc = intelxl_alloc_ring ( intelxl, &intelxl->rx.ring ) ) != 0 )
+ if ( ( rc = intelxl_alloc_ring ( intelxl, &intelxl->rx ) ) != 0 )
goto err_alloc_rx;
/* Configure queues */
@@ -531,9 +531,9 @@ static int intelxlvf_open ( struct net_device *netdev ) {
err_enable:
err_irq_map:
err_configure:
- intelxl_free_ring ( intelxl, &intelxl->rx.ring );
+ intelxl_free_ring ( intelxl, &intelxl->rx );
err_alloc_rx:
- intelxl_free_ring ( intelxl, &intelxl->tx.ring );
+ intelxl_free_ring ( intelxl, &intelxl->tx );
err_alloc_tx:
return rc;
}
@@ -554,13 +554,13 @@ static void intelxlvf_close ( struct net_device *netdev ) {
}
/* Free receive descriptor ring */
- intelxl_free_ring ( intelxl, &intelxl->rx.ring );
+ intelxl_free_ring ( intelxl, &intelxl->rx );
/* Free transmit descriptor ring */
- intelxl_free_ring ( intelxl, &intelxl->tx.ring );
+ intelxl_free_ring ( intelxl, &intelxl->tx );
- /* Flush unused buffers */
- intelxl_flush ( intelxl );
+ /* Discard any unused receive buffers */
+ intelxl_empty_rx ( intelxl );
}
/** Network device operations */
@@ -605,11 +605,11 @@ static int intelxlvf_probe ( struct pci_device *pci ) {
&intelxlvf_admin_command_offsets );
intelxl_init_admin ( &intelxl->event, INTELXLVF_ADMIN,
&intelxlvf_admin_event_offsets );
- intelxlvf_init_ring ( &intelxl->tx.ring, INTELXL_TX_NUM_DESC,
- sizeof ( intelxl->tx.ring.desc.tx[0] ),
+ intelxlvf_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
+ sizeof ( intelxl->tx.desc.tx[0] ),
INTELXLVF_QTX_TAIL );
- intelxlvf_init_ring ( &intelxl->rx.ring, INTELXL_RX_NUM_DESC,
- sizeof ( intelxl->rx.ring.desc.rx[0] ),
+ intelxlvf_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
+ sizeof ( intelxl->rx.desc.rx[0] ),
INTELXLVF_QRX_TAIL );
/* Fix up PCI device */
@@ -625,6 +625,7 @@ static int intelxlvf_probe ( struct pci_device *pci ) {
/* Configure DMA */
intelxl->dma = &pci->dma;
dma_set_mask_64bit ( intelxl->dma );
+ netdev->dma = intelxl->dma;
/* Locate PCI Express capability */
intelxl->exp = pci_find_capability ( pci, PCI_CAP_ID_EXP );
diff --git a/src/drivers/net/intelxvf.c b/src/drivers/net/intelxvf.c
index a650979ef..f0ba091d5 100644
--- a/src/drivers/net/intelxvf.c
+++ b/src/drivers/net/intelxvf.c
@@ -276,11 +276,11 @@ static int intelxvf_open ( struct net_device *netdev ) {
}
/* Create transmit descriptor ring */
- if ( ( rc = intel_create_ring ( intel, &intel->tx.ring ) ) != 0 )
+ if ( ( rc = intel_create_ring ( intel, &intel->tx ) ) != 0 )
goto err_create_tx;
/* Create receive descriptor ring */
- if ( ( rc = intel_create_ring ( intel, &intel->rx.ring ) ) != 0 )
+ if ( ( rc = intel_create_ring ( intel, &intel->rx ) ) != 0 )
goto err_create_rx;
/* Allocate interrupt vectors */
@@ -317,9 +317,9 @@ static int intelxvf_open ( struct net_device *netdev ) {
return 0;
- intel_destroy_ring ( intel, &intel->rx.ring );
+ intel_destroy_ring ( intel, &intel->rx );
err_create_rx:
- intel_destroy_ring ( intel, &intel->tx.ring );
+ intel_destroy_ring ( intel, &intel->tx );
err_create_tx:
err_mbox_set_mtu:
err_mbox_set_mac:
@@ -337,13 +337,13 @@ static void intelxvf_close ( struct net_device *netdev ) {
struct intel_nic *intel = netdev->priv;
/* Destroy receive descriptor ring */
- intel_destroy_ring ( intel, &intel->rx.ring );
+ intel_destroy_ring ( intel, &intel->rx );
- /* Flush unused buffers */
- intel_flush ( intel );
+ /* Discard any unused receive buffers */
+ intel_empty_rx ( intel );
/* Destroy transmit descriptor ring */
- intel_destroy_ring ( intel, &intel->tx.ring );
+ intel_destroy_ring ( intel, &intel->tx );
/* Reset the function */
intelxvf_reset ( intel );
@@ -447,9 +447,9 @@ static int intelxvf_probe ( struct pci_device *pci ) {
netdev->dev = &pci->dev;
memset ( intel, 0, sizeof ( *intel ) );
intel_init_mbox ( &intel->mbox, INTELXVF_MBCTRL, INTELXVF_MBMEM );
- intel_init_ring ( &intel->tx.ring, INTEL_NUM_TX_DESC, INTELXVF_TD(0),
+ intel_init_ring ( &intel->tx, INTEL_NUM_TX_DESC, INTELXVF_TD(0),
intel_describe_tx_adv );
- intel_init_ring ( &intel->rx.ring, INTEL_NUM_RX_DESC, INTELXVF_RD(0),
+ intel_init_ring ( &intel->rx, INTEL_NUM_RX_DESC, INTELXVF_RD(0),
intel_describe_rx );
/* Fix up PCI device */
@@ -465,6 +465,7 @@ static int intelxvf_probe ( struct pci_device *pci ) {
/* Configure DMA */
intel->dma = &pci->dma;
dma_set_mask_64bit ( intel->dma );
+ netdev->dma = intel->dma;
/* Reset the function */
intelxvf_reset ( intel );
diff --git a/src/drivers/net/realtek.c b/src/drivers/net/realtek.c
index 47d435f72..0af3416d5 100644
--- a/src/drivers/net/realtek.c
+++ b/src/drivers/net/realtek.c
@@ -621,7 +621,6 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl,
static void realtek_refill_rx ( struct realtek_nic *rtl ) {
struct realtek_descriptor *rx;
struct io_buffer *iobuf;
- struct dma_mapping *map;
unsigned int rx_idx;
int is_last;
@@ -629,34 +628,32 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) {
if ( rtl->legacy )
return;
- while ( ( rtl->rx.ring.prod - rtl->rx.ring.cons ) < RTL_NUM_RX_DESC ) {
-
- /* Get next receive descriptor */
- rx_idx = ( rtl->rx.ring.prod % RTL_NUM_RX_DESC );
- is_last = ( rx_idx == ( RTL_NUM_RX_DESC - 1 ) );
- rx = &rtl->rx.ring.desc[rx_idx];
- map = &rtl->rx.map[rx_idx];
- assert ( rtl->rx.iobuf[rx_idx] == NULL );
+ while ( ( rtl->rx.prod - rtl->rx.cons ) < RTL_NUM_RX_DESC ) {
/* Allocate I/O buffer */
- iobuf = dma_alloc_rx_iob ( rtl->dma, map, RTL_RX_MAX_LEN );
+ iobuf = alloc_rx_iob ( RTL_RX_MAX_LEN, rtl->dma );
if ( ! iobuf ) {
/* Wait for next refill */
return;
}
- rtl->rx.iobuf[rx_idx] = iobuf;
- /* Update producer index */
- rtl->rx.ring.prod++;
+ /* Get next receive descriptor */
+ rx_idx = ( rtl->rx.prod++ % RTL_NUM_RX_DESC );
+ is_last = ( rx_idx == ( RTL_NUM_RX_DESC - 1 ) );
+ rx = &rtl->rx.desc[rx_idx];
/* Populate receive descriptor */
- rx->address = cpu_to_le64 ( dma ( map, iobuf->data ) );
+ rx->address = cpu_to_le64 ( iob_dma ( iobuf ) );
rx->length = cpu_to_le16 ( RTL_RX_MAX_LEN );
wmb();
rx->flags = ( cpu_to_le16 ( RTL_DESC_OWN ) |
( is_last ? cpu_to_le16 ( RTL_DESC_EOR ) : 0 ) );
wmb();
+ /* Record I/O buffer */
+ assert ( rtl->rx_iobuf[rx_idx] == NULL );
+ rtl->rx_iobuf[rx_idx] = iobuf;
+
DBGC2 ( rtl, "REALTEK %p RX %d is [%lx,%lx)\n",
rtl, rx_idx, virt_to_phys ( iobuf->data ),
( virt_to_phys ( iobuf->data ) + RTL_RX_MAX_LEN ) );
@@ -676,11 +673,11 @@ static int realtek_open ( struct net_device *netdev ) {
int rc;
/* Create transmit descriptor ring */
- if ( ( rc = realtek_create_ring ( rtl, &rtl->tx.ring ) ) != 0 )
+ if ( ( rc = realtek_create_ring ( rtl, &rtl->tx ) ) != 0 )
goto err_create_tx;
/* Create receive descriptor ring */
- if ( ( rc = realtek_create_ring ( rtl, &rtl->rx.ring ) ) != 0 )
+ if ( ( rc = realtek_create_ring ( rtl, &rtl->rx ) ) != 0 )
goto err_create_rx;
/* Create receive buffer */
@@ -721,9 +718,9 @@ static int realtek_open ( struct net_device *netdev ) {
realtek_destroy_buffer ( rtl );
err_create_buffer:
- realtek_destroy_ring ( rtl, &rtl->rx.ring );
+ realtek_destroy_ring ( rtl, &rtl->rx );
err_create_rx:
- realtek_destroy_ring ( rtl, &rtl->tx.ring );
+ realtek_destroy_ring ( rtl, &rtl->tx );
err_create_tx:
return rc;
}
@@ -744,23 +741,17 @@ static void realtek_close ( struct net_device *netdev ) {
realtek_destroy_buffer ( rtl );
/* Destroy receive descriptor ring */
- realtek_destroy_ring ( rtl, &rtl->rx.ring );
+ realtek_destroy_ring ( rtl, &rtl->rx );
/* Discard any unused receive buffers */
for ( i = 0 ; i < RTL_NUM_RX_DESC ; i++ ) {
- if ( rtl->rx.iobuf[i] ) {
- dma_unmap ( &rtl->rx.map[i] );
- free_iob ( rtl->rx.iobuf[i] );
- }
- rtl->rx.iobuf[i] = NULL;
+ if ( rtl->rx_iobuf[i] )
+ free_rx_iob ( rtl->rx_iobuf[i] );
+ rtl->rx_iobuf[i] = NULL;
}
- /* Unmap any incomplete transmit buffers */
- for ( i = rtl->tx.ring.cons ; i != rtl->tx.ring.prod ; i++ )
- dma_unmap ( &rtl->tx.map[ i % RTL_NUM_TX_DESC ] );
-
/* Destroy transmit descriptor ring */
- realtek_destroy_ring ( rtl, &rtl->tx.ring );
+ realtek_destroy_ring ( rtl, &rtl->tx );
/* Reset legacy transmit descriptor index, if applicable */
if ( rtl->legacy )
@@ -778,37 +769,33 @@ static int realtek_transmit ( struct net_device *netdev,
struct io_buffer *iobuf ) {
struct realtek_nic *rtl = netdev->priv;
struct realtek_descriptor *tx;
- struct dma_mapping *map;
unsigned int tx_idx;
- physaddr_t address;
int is_last;
int rc;
/* Get next transmit descriptor */
- if ( ( rtl->tx.ring.prod - rtl->tx.ring.cons ) >= RTL_NUM_TX_DESC ) {
+ if ( ( rtl->tx.prod - rtl->tx.cons ) >= RTL_NUM_TX_DESC ) {
netdev_tx_defer ( netdev, iobuf );
return 0;
}
- tx_idx = ( rtl->tx.ring.prod % RTL_NUM_TX_DESC );
- map = &rtl->tx.map[tx_idx];
+ tx_idx = ( rtl->tx.prod % RTL_NUM_TX_DESC );
/* Pad and align packet, if needed */
if ( rtl->legacy )
iob_pad ( iobuf, ETH_ZLEN );
/* Map I/O buffer */
- if ( ( rc = dma_map_tx_iob ( rtl->dma, map, iobuf ) ) != 0 )
+ if ( ( rc = iob_map_tx ( iobuf, rtl->dma ) ) != 0 )
return rc;
- address = dma ( map, iobuf->data );
/* Update producer index */
- rtl->tx.ring.prod++;
+ rtl->tx.prod++;
/* Transmit packet */
if ( rtl->legacy ) {
/* Add to transmit ring */
- writel ( address, rtl->regs + RTL_TSAD ( tx_idx ) );
+ writel ( iob_dma ( iobuf ), rtl->regs + RTL_TSAD ( tx_idx ) );
writel ( ( RTL_TSD_ERTXTH_DEFAULT | iob_len ( iobuf ) ),
rtl->regs + RTL_TSD ( tx_idx ) );
@@ -816,8 +803,8 @@ static int realtek_transmit ( struct net_device *netdev,
/* Populate transmit descriptor */
is_last = ( tx_idx == ( RTL_NUM_TX_DESC - 1 ) );
- tx = &rtl->tx.ring.desc[tx_idx];
- tx->address = cpu_to_le64 ( address );
+ tx = &rtl->tx.desc[tx_idx];
+ tx->address = cpu_to_le64 ( iob_dma ( iobuf ) );
tx->length = cpu_to_le16 ( iob_len ( iobuf ) );
wmb();
tx->flags = ( cpu_to_le16 ( RTL_DESC_OWN | RTL_DESC_FS |
@@ -847,10 +834,10 @@ static void realtek_poll_tx ( struct net_device *netdev ) {
unsigned int tx_idx;
/* Check for completed packets */
- while ( rtl->tx.ring.cons != rtl->tx.ring.prod ) {
+ while ( rtl->tx.cons != rtl->tx.prod ) {
/* Get next transmit descriptor */
- tx_idx = ( rtl->tx.ring.cons % RTL_NUM_TX_DESC );
+ tx_idx = ( rtl->tx.cons % RTL_NUM_TX_DESC );
/* Stop if descriptor is still in use */
if ( rtl->legacy ) {
@@ -863,18 +850,15 @@ static void realtek_poll_tx ( struct net_device *netdev ) {
} else {
/* Check ownership bit in descriptor */
- tx = &rtl->tx.ring.desc[tx_idx];
+ tx = &rtl->tx.desc[tx_idx];
if ( tx->flags & cpu_to_le16 ( RTL_DESC_OWN ) )
return;
}
DBGC2 ( rtl, "REALTEK %p TX %d complete\n", rtl, tx_idx );
- /* Unmap I/O buffer */
- dma_unmap ( &rtl->tx.map[tx_idx] );
-
/* Complete TX descriptor */
- rtl->tx.ring.cons++;
+ rtl->tx.cons++;
netdev_tx_complete_next ( netdev );
}
}
@@ -954,22 +938,19 @@ static void realtek_poll_rx ( struct net_device *netdev ) {
}
/* Check for received packets */
- while ( rtl->rx.ring.cons != rtl->rx.ring.prod ) {
+ while ( rtl->rx.cons != rtl->rx.prod ) {
/* Get next receive descriptor */
- rx_idx = ( rtl->rx.ring.cons % RTL_NUM_RX_DESC );
- rx = &rtl->rx.ring.desc[rx_idx];
+ rx_idx = ( rtl->rx.cons % RTL_NUM_RX_DESC );
+ rx = &rtl->rx.desc[rx_idx];
/* Stop if descriptor is still in use */
if ( rx->flags & cpu_to_le16 ( RTL_DESC_OWN ) )
return;
- /* Unmap buffer */
- dma_unmap ( &rtl->rx.map[rx_idx] );
-
/* Populate I/O buffer */
- iobuf = rtl->rx.iobuf[rx_idx];
- rtl->rx.iobuf[rx_idx] = NULL;
+ iobuf = rtl->rx_iobuf[rx_idx];
+ rtl->rx_iobuf[rx_idx] = NULL;
len = ( le16_to_cpu ( rx->length ) & RTL_DESC_SIZE_MASK );
iob_put ( iobuf, ( len - 4 /* strip CRC */ ) );
@@ -984,7 +965,7 @@ static void realtek_poll_rx ( struct net_device *netdev ) {
"%zd)\n", rtl, rx_idx, len );
netdev_rx ( netdev, iobuf );
}
- rtl->rx.ring.cons++;
+ rtl->rx.cons++;
}
}
@@ -1128,9 +1109,8 @@ static int realtek_probe ( struct pci_device *pci ) {
pci_set_drvdata ( pci, netdev );
netdev->dev = &pci->dev;
memset ( rtl, 0, sizeof ( *rtl ) );
- rtl->dma = &pci->dma;
- realtek_init_ring ( &rtl->tx.ring, RTL_NUM_TX_DESC, RTL_TNPDS );
- realtek_init_ring ( &rtl->rx.ring, RTL_NUM_RX_DESC, RTL_RDSAR );
+ realtek_init_ring ( &rtl->tx, RTL_NUM_TX_DESC, RTL_TNPDS );
+ realtek_init_ring ( &rtl->rx, RTL_NUM_RX_DESC, RTL_RDSAR );
/* Fix up PCI device */
adjust_pci_device ( pci );
@@ -1142,6 +1122,9 @@ static int realtek_probe ( struct pci_device *pci ) {
goto err_ioremap;
}
+ /* Configure DMA */
+ rtl->dma = &pci->dma;
+
/* Reset the NIC */
if ( ( rc = realtek_reset ( rtl ) ) != 0 )
goto err_reset;
diff --git a/src/drivers/net/realtek.h b/src/drivers/net/realtek.h
index c7cb7e422..d4642fd76 100644
--- a/src/drivers/net/realtek.h
+++ b/src/drivers/net/realtek.h
@@ -274,24 +274,6 @@ realtek_init_ring ( struct realtek_ring *ring, unsigned int count,
ring->reg = reg;
}
-/** Transmit ring */
-struct realtek_tx_ring {
- /** Descriptor ring */
- struct realtek_ring ring;
- /** DMA mappings */
- struct dma_mapping map[RTL_NUM_TX_DESC];
-};
-
-/** Receive ring */
-struct realtek_rx_ring {
- /** Descriptor ring */
- struct realtek_ring ring;
- /** I/O buffers */
- struct io_buffer *iobuf[RTL_NUM_RX_DESC];
- /** DMA mappings */
- struct dma_mapping map[RTL_NUM_RX_DESC];
-};
-
/** Receive buffer (legacy mode *) */
struct realtek_rx_buffer {
/** Buffer */
@@ -327,9 +309,11 @@ struct realtek_nic {
unsigned int tppoll;
/** Transmit descriptor ring */
- struct realtek_tx_ring tx;
+ struct realtek_ring tx;
/** Receive descriptor ring */
- struct realtek_rx_ring rx;
+ struct realtek_ring rx;
+ /** Receive I/O buffers */
+ struct io_buffer *rx_iobuf[RTL_NUM_RX_DESC];
/** Receive buffer (legacy mode) */
struct realtek_rx_buffer rxbuf;
};
diff --git a/src/include/ipxe/dma.h b/src/include/ipxe/dma.h
index 842c9d6ef..b3fa24e47 100644
--- a/src/include/ipxe/dma.h
+++ b/src/include/ipxe/dma.h
@@ -12,7 +12,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <stdint.h>
#include <ipxe/api.h>
#include <ipxe/io.h>
-#include <ipxe/iobuf.h>
#include <ipxe/malloc.h>
#include <config/ioapi.h>
@@ -385,25 +384,4 @@ dma_set_mask_64bit ( struct dma_device *dma ) {
dma_set_mask ( dma, ~( ( physaddr_t ) 0 ) );
}
-/**
- * Map I/O buffer for transmitting data to device
- *
- * @v dma DMA device
- * @v map DMA mapping to fill in
- * @v iobuf I/O buffer
- * @ret rc Return status code
- */
-static inline __always_inline int
-dma_map_tx_iob ( struct dma_device *dma, struct dma_mapping *map,
- struct io_buffer *iobuf ) {
-
- /* Map I/O buffer */
- return dma_map ( dma, map, virt_to_phys ( iobuf->data ),
- iob_len ( iobuf ), DMA_TX );
-}
-
-extern struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma,
- struct dma_mapping *map,
- size_t len );
-
#endif /* _IPXE_DMA_H */
diff --git a/src/include/ipxe/iobuf.h b/src/include/ipxe/iobuf.h
index b40ade350..630a7753c 100644
--- a/src/include/ipxe/iobuf.h
+++ b/src/include/ipxe/iobuf.h
@@ -12,6 +12,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <stdint.h>
#include <assert.h>
#include <ipxe/list.h>
+#include <ipxe/dma.h>
/**
* Minimum I/O buffer length
@@ -38,6 +39,9 @@ struct io_buffer {
*/
struct list_head list;
+ /** DMA mapping */
+ struct dma_mapping map;
+
/** Start of the buffer */
void *head;
/** Start of data */
@@ -210,10 +214,61 @@ static inline void iob_populate ( struct io_buffer *iobuf,
(iobuf) = NULL; \
__iobuf; } )
+/**
+ * Map I/O buffer for transmit DMA
+ *
+ * @v iobuf I/O buffer
+ * @v dma DMA device
+ * @ret rc Return status code
+ */
+static inline __always_inline int iob_map_tx ( struct io_buffer *iobuf,
+ struct dma_device *dma ) {
+ return dma_map ( dma, &iobuf->map, virt_to_phys ( iobuf->data ),
+ iob_len ( iobuf ), DMA_TX );
+}
+
+/**
+ * Map empty I/O buffer for receive DMA
+ *
+ * @v iobuf I/O buffer
+ * @v dma DMA device
+ * @ret rc Return status code
+ */
+static inline __always_inline int iob_map_rx ( struct io_buffer *iobuf,
+ struct dma_device *dma ) {
+ assert ( iob_len ( iobuf ) == 0 );
+ return dma_map ( dma, &iobuf->map, virt_to_phys ( iobuf->data ),
+ iob_tailroom ( iobuf ), DMA_RX );
+}
+
+/**
+ * Get I/O buffer DMA address
+ *
+ * @v iobuf I/O buffer
+ * @ret addr DMA address
+ */
+static inline __always_inline physaddr_t iob_dma ( struct io_buffer *iobuf ) {
+ return dma ( &iobuf->map, iobuf->data );
+}
+
+/**
+ * Unmap I/O buffer for DMA
+ *
+ * @v iobuf I/O buffer
+ * @v dma DMA device
+ * @ret rc Return status code
+ */
+static inline __always_inline void iob_unmap ( struct io_buffer *iobuf ) {
+ dma_unmap ( &iobuf->map );
+}
+
extern struct io_buffer * __malloc alloc_iob_raw ( size_t len, size_t align,
size_t offset );
extern struct io_buffer * __malloc alloc_iob ( size_t len );
extern void free_iob ( struct io_buffer *iobuf );
+extern struct io_buffer * __malloc alloc_rx_iob ( size_t len,
+ struct dma_device *dma );
+extern void free_rx_iob ( struct io_buffer *iobuf );
extern void iob_pad ( struct io_buffer *iobuf, size_t min_len );
extern int iob_ensure_headroom ( struct io_buffer *iobuf, size_t len );
extern struct io_buffer * iob_concatenate ( struct list_head *list );
diff --git a/src/include/ipxe/netdevice.h b/src/include/ipxe/netdevice.h
index d498ab697..b9c651c71 100644
--- a/src/include/ipxe/netdevice.h
+++ b/src/include/ipxe/netdevice.h
@@ -246,6 +246,10 @@ struct net_device_operations {
*
* This method is guaranteed to be called only when the device
* is open.
+ *
+ * If the network device has an associated DMA device, then
+ * the I/O buffer will be automatically mapped for transmit
+ * DMA.
*/
int ( * transmit ) ( struct net_device *netdev,
struct io_buffer *iobuf );
@@ -358,6 +362,8 @@ struct net_device {
char name[NETDEV_NAME_LEN];
/** Underlying hardware device */
struct device *dev;
+ /** DMA device */
+ struct dma_device *dma;
/** Network device operations */
struct net_device_operations *op;
diff --git a/src/interface/efi/efi_pci.c b/src/interface/efi/efi_pci.c
index 7687ffb43..8c30c9514 100644
--- a/src/interface/efi/efi_pci.c
+++ b/src/interface/efi/efi_pci.c
@@ -335,6 +335,7 @@ static int efipci_dma_map ( struct dma_device *dma, struct dma_mapping *map,
int rc;
/* Sanity check */
+ assert ( map->dma == NULL );
assert ( map->offset == 0 );
assert ( map->token == NULL );
@@ -409,6 +410,7 @@ static void efipci_dma_unmap ( struct dma_device *dma,
pci_io->Unmap ( pci_io, map->token );
/* Clear mapping */
+ map->dma = NULL;
map->offset = 0;
map->token = NULL;
diff --git a/src/net/netdevice.c b/src/net/netdevice.c
index 3b02e64bd..f3feca26b 100644
--- a/src/net/netdevice.c
+++ b/src/net/netdevice.c
@@ -307,6 +307,12 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) {
if ( ( rc = inject_fault ( NETDEV_DISCARD_RATE ) ) != 0 )
goto err;
+ /* Map for DMA, if required */
+ if ( netdev->dma && ( ! dma_mapped ( &iobuf->map ) ) ) {
+ if ( ( rc = iob_map_tx ( iobuf, netdev->dma ) ) != 0 )
+ goto err;
+ }
+
/* Transmit packet */
if ( ( rc = netdev->op->transmit ( netdev, iobuf ) ) != 0 )
goto err;
@@ -340,6 +346,9 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) {
* Failure to do this will cause the retransmitted packet to be
* immediately redeferred (which will result in out-of-order
* transmissions and other nastiness).
+ *
+ * I/O buffers that have been mapped for DMA will remain mapped while
+ * present in the deferred transmit queue.
*/
void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) {
@@ -365,6 +374,9 @@ void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) {
*
* The packet is discarded and a TX error is recorded. This function
* takes ownership of the I/O buffer.
+ *
+ * The I/O buffer will be automatically unmapped for DMA, if
+ * applicable.
*/
void netdev_tx_err ( struct net_device *netdev,
struct io_buffer *iobuf, int rc ) {
@@ -379,6 +391,10 @@ void netdev_tx_err ( struct net_device *netdev,
netdev->name, iobuf, strerror ( rc ) );
}
+ /* Unmap I/O buffer, if required */
+ if ( dma_mapped ( &iobuf->map ) )
+ iob_unmap ( iobuf );
+
/* Discard packet */
free_iob ( iobuf );
}
@@ -466,6 +482,9 @@ static void netdev_tx_flush ( struct net_device *netdev ) {
*
* The packet is added to the network device's RX queue. This
* function takes ownership of the I/O buffer.
+ *
+ * The I/O buffer will be automatically unmapped for DMA, if
+ * applicable.
*/
void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) {
int rc;
@@ -479,6 +498,10 @@ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) {
return;
}
+ /* Unmap I/O buffer, if required */
+ if ( dma_mapped ( &iobuf->map ) )
+ iob_unmap ( iobuf );
+
/* Enqueue packet */
list_add_tail ( &iobuf->list, &netdev->rx_queue );
@@ -497,6 +520,9 @@ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) {
* takes ownership of the I/O buffer. @c iobuf may be NULL if, for
* example, the net device wishes to report an error due to being
* unable to allocate an I/O buffer.
+ *
+ * The I/O buffer will be automatically unmapped for DMA, if
+ * applicable.
*/
void netdev_rx_err ( struct net_device *netdev,
struct io_buffer *iobuf, int rc ) {
@@ -504,6 +530,10 @@ void netdev_rx_err ( struct net_device *netdev,
DBGC ( netdev, "NETDEV %s failed to receive %p: %s\n",
netdev->name, iobuf, strerror ( rc ) );
+ /* Unmap I/O buffer, if required */
+ if ( iobuf && dma_mapped ( &iobuf->map ) )
+ iob_unmap ( iobuf );
+
/* Discard packet */
free_iob ( iobuf );
@@ -1178,6 +1208,8 @@ static unsigned int net_discard ( void ) {
/* Discard first deferred packet */
list_del ( &iobuf->list );
+ if ( dma_mapped ( &iobuf->map ) )
+ iob_unmap ( iobuf );
free_iob ( iobuf );
/* Report discard */