diff options
author | Michael Brown <mcb30@ipxe.org> | 2020-11-26 12:25:02 +0000 |
---|---|---|
committer | Michael Brown <mcb30@ipxe.org> | 2020-11-28 20:26:28 +0000 |
commit | 8d337ecdae3c6d555ea57996bc2280debd984a9c (patch) | |
tree | 9a572ccfe77432a735a0df76f2a91e5fe93cefcf /src/net | |
parent | 70e6e83243b77a3771756106871d6f945062a44b (diff) | |
download | ipxe-8d337ecdae3c6d555ea57996bc2280debd984a9c.tar.gz |
[dma] Move I/O buffer DMA operations to iobuf.h
Include a potential DMA mapping within the definition of an I/O
buffer, and move all I/O buffer DMA mapping functions from dma.h to
iobuf.h. This avoids the need for drivers to maintain a separate list
of DMA mappings for each I/O buffer that they may handle.
Network device drivers typically do not keep track of transmit I/O
buffers, since the network device core already maintains a transmit
queue. Drivers will typically call netdev_tx_complete_next() to
complete a transmission without first obtaining the relevant I/O
buffer pointer (and will rely on the network device core automatically
cancelling any pending transmissions when the device is closed).
To allow this driver design approach to be retained, update the
netdev_tx_complete() family of functions to automatically perform the
DMA unmapping operation if required. For symmetry, also update the
netdev_rx() family of functions to behave the same way.
As a further convenience for drivers, allow the network device core to
automatically perform DMA mapping on the transmit datapath before
calling the driver's transmit() method. This avoids the need to
introduce a mapping error handling code path into the typically
error-free transmit methods.
With these changes, the modifications required to update a typical
network device driver to use the new DMA API are fairly minimal:
- Allocate and free descriptor rings and similar coherent structures
using dma_alloc()/dma_free() rather than malloc_phys()/free_phys()
- Allocate and free receive buffers using alloc_rx_iob()/free_rx_iob()
rather than alloc_iob()/free_iob()
- Calculate DMA addresses using dma() or iob_dma() rather than
virt_to_bus()
- Set a 64-bit DMA mask if needed using dma_set_mask_64bit() and
thereafter eliminate checks on DMA address ranges
- Either record the DMA device in netdev->dma, or call iob_map_tx() as
part of the transmit() method
- Ensure that debug messages use virt_to_phys() when displaying
"hardware" addresses
Signed-off-by: Michael Brown <mcb30@ipxe.org>
Diffstat (limited to 'src/net')
-rw-r--r-- | src/net/netdevice.c | 32 |
1 files changed, 32 insertions, 0 deletions
diff --git a/src/net/netdevice.c b/src/net/netdevice.c index 3b02e64bd..f3feca26b 100644 --- a/src/net/netdevice.c +++ b/src/net/netdevice.c @@ -307,6 +307,12 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) { if ( ( rc = inject_fault ( NETDEV_DISCARD_RATE ) ) != 0 ) goto err; + /* Map for DMA, if required */ + if ( netdev->dma && ( ! dma_mapped ( &iobuf->map ) ) ) { + if ( ( rc = iob_map_tx ( iobuf, netdev->dma ) ) != 0 ) + goto err; + } + /* Transmit packet */ if ( ( rc = netdev->op->transmit ( netdev, iobuf ) ) != 0 ) goto err; @@ -340,6 +346,9 @@ int netdev_tx ( struct net_device *netdev, struct io_buffer *iobuf ) { * Failure to do this will cause the retransmitted packet to be * immediately redeferred (which will result in out-of-order * transmissions and other nastiness). + * + * I/O buffers that have been mapped for DMA will remain mapped while + * present in the deferred transmit queue. */ void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) { @@ -365,6 +374,9 @@ void netdev_tx_defer ( struct net_device *netdev, struct io_buffer *iobuf ) { * * The packet is discarded and a TX error is recorded. This function * takes ownership of the I/O buffer. + * + * The I/O buffer will be automatically unmapped for DMA, if + * applicable. */ void netdev_tx_err ( struct net_device *netdev, struct io_buffer *iobuf, int rc ) { @@ -379,6 +391,10 @@ void netdev_tx_err ( struct net_device *netdev, netdev->name, iobuf, strerror ( rc ) ); } + /* Unmap I/O buffer, if required */ + if ( dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); + /* Discard packet */ free_iob ( iobuf ); } @@ -466,6 +482,9 @@ static void netdev_tx_flush ( struct net_device *netdev ) { * * The packet is added to the network device's RX queue. This * function takes ownership of the I/O buffer. + * + * The I/O buffer will be automatically unmapped for DMA, if + * applicable. */ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) { int rc; @@ -479,6 +498,10 @@ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) { return; } + /* Unmap I/O buffer, if required */ + if ( dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); + /* Enqueue packet */ list_add_tail ( &iobuf->list, &netdev->rx_queue ); @@ -497,6 +520,9 @@ void netdev_rx ( struct net_device *netdev, struct io_buffer *iobuf ) { * takes ownership of the I/O buffer. @c iobuf may be NULL if, for * example, the net device wishes to report an error due to being * unable to allocate an I/O buffer. + * + * The I/O buffer will be automatically unmapped for DMA, if + * applicable. */ void netdev_rx_err ( struct net_device *netdev, struct io_buffer *iobuf, int rc ) { @@ -504,6 +530,10 @@ void netdev_rx_err ( struct net_device *netdev, DBGC ( netdev, "NETDEV %s failed to receive %p: %s\n", netdev->name, iobuf, strerror ( rc ) ); + /* Unmap I/O buffer, if required */ + if ( iobuf && dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); + /* Discard packet */ free_iob ( iobuf ); @@ -1178,6 +1208,8 @@ static unsigned int net_discard ( void ) { /* Discard first deferred packet */ list_del ( &iobuf->list ); + if ( dma_mapped ( &iobuf->map ) ) + iob_unmap ( iobuf ); free_iob ( iobuf ); /* Report discard */ |