diff options
author | Michael Brown <mcb30@ipxe.org> | 2020-11-27 11:27:22 +0000 |
---|---|---|
committer | Michael Brown <mcb30@ipxe.org> | 2020-11-28 18:56:50 +0000 |
commit | 70e6e83243b77a3771756106871d6f945062a44b (patch) | |
tree | 8ea5409f8b4992404dd3ff8c9a1f8334c6e9e226 | |
parent | cf12a41703a8b2292e5d2d7528c2733c37869681 (diff) | |
download | ipxe-70e6e83243b77a3771756106871d6f945062a44b.tar.gz |
[dma] Record DMA device as part of DMA mapping if needed
Allow for dma_unmap() to be called by code other than the DMA device
driver itself.
Signed-off-by: Michael Brown <mcb30@ipxe.org>
-rw-r--r-- | src/core/dma.c | 52 | ||||
-rw-r--r-- | src/drivers/net/intel.c | 18 | ||||
-rw-r--r-- | src/drivers/net/intelxl.c | 40 | ||||
-rw-r--r-- | src/drivers/net/realtek.c | 23 | ||||
-rw-r--r-- | src/include/ipxe/dma.h | 121 | ||||
-rw-r--r-- | src/interface/efi/efi_pci.c | 25 |
6 files changed, 150 insertions, 129 deletions
diff --git a/src/core/dma.c b/src/core/dma.c index 3bf6957b9..561aec1e1 100644 --- a/src/core/dma.c +++ b/src/core/dma.c @@ -59,66 +59,65 @@ PROVIDE_DMAAPI_INLINE ( flat, dma_phys ); * Map buffer for DMA * * @v dma DMA device + * @v map DMA mapping to fill in * @v addr Buffer address * @v len Length of buffer * @v flags Mapping flags - * @v map DMA mapping to fill in * @ret rc Return status code */ -static int dma_op_map ( struct dma_device *dma, physaddr_t addr, size_t len, - int flags, struct dma_mapping *map ) { +static int dma_op_map ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ) { struct dma_operations *op = dma->op; if ( ! op ) return -ENODEV; - return op->map ( dma, addr, len, flags, map ); + return op->map ( dma, map, addr, len, flags ); } /** * Unmap buffer * - * @v dma DMA device * @v map DMA mapping */ -static void dma_op_unmap ( struct dma_device *dma, struct dma_mapping *map ) { - struct dma_operations *op = dma->op; +static void dma_op_unmap ( struct dma_mapping *map ) { + struct dma_device *dma = map->dma; - assert ( op != NULL ); - op->unmap ( dma, map ); + assert ( dma != NULL ); + assert ( dma->op != NULL ); + dma->op->unmap ( dma, map ); } /** * Allocate and map DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping to fill in * @v len Length of buffer * @v align Physical alignment - * @v map DMA mapping to fill in * @ret addr Buffer address, or NULL on error */ -static void * dma_op_alloc ( struct dma_device *dma, size_t len, size_t align, - struct dma_mapping *map ) { +static void * dma_op_alloc ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ) { struct dma_operations *op = dma->op; if ( ! op ) return NULL; - return op->alloc ( dma, len, align, map ); + return op->alloc ( dma, map, len, align ); } /** * Unmap and free DMA-coherent buffer * - * @v dma DMA device + * @v map DMA mapping * @v addr Buffer address * @v len Length of buffer - * @v map DMA mapping */ -static void dma_op_free ( struct dma_device *dma, void *addr, size_t len, - struct dma_mapping *map ) { - struct dma_operations *op = dma->op; +static void dma_op_free ( struct dma_mapping *map, void *addr, size_t len ) { + struct dma_device *dma = map->dma; - assert ( op != NULL ); - op->free ( dma, addr, len, map ); + assert ( dma != NULL ); + assert ( dma->op != NULL ); + dma->op->free ( dma, map, addr, len ); } /** @@ -152,12 +151,13 @@ PROVIDE_DMAAPI_INLINE ( op, dma_phys ); * Allocate and map I/O buffer for receiving data from device * * @v dma DMA device - * @v len Length of I/O buffer * @v map DMA mapping to fill in + * @v len Length of I/O buffer * @ret iobuf I/O buffer, or NULL on error */ -struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len, - struct dma_mapping *map ) { +struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, + struct dma_mapping *map, + size_t len ) { struct io_buffer *iobuf; int rc; @@ -167,13 +167,13 @@ struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len, goto err_alloc; /* Map I/O buffer */ - if ( ( rc = dma_map ( dma, virt_to_phys ( iobuf->data ), len, - DMA_RX, map ) ) != 0 ) + if ( ( rc = dma_map ( dma, map, virt_to_phys ( iobuf->data ), + len, DMA_RX ) ) != 0 ) goto err_map; return iobuf; - dma_unmap ( dma, map ); + dma_unmap ( map ); err_map: free_iob ( iobuf ); err_alloc: diff --git a/src/drivers/net/intel.c b/src/drivers/net/intel.c index 5c6dc2143..93c5fd1f6 100644 --- a/src/drivers/net/intel.c +++ b/src/drivers/net/intel.c @@ -504,8 +504,8 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) { * prevent any possible page-crossing errors due to hardware * errata. */ - ring->desc = dma_alloc ( intel->dma, ring->len, ring->len, - &ring->map ); + ring->desc = dma_alloc ( intel->dma, &ring->map, ring->len, + ring->len ); if ( ! ring->desc ) return -ENOMEM; @@ -554,7 +554,7 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) { intel_reset_ring ( intel, ring->reg ); /* Free descriptor ring */ - dma_free ( intel->dma, ring->desc, ring->len, &ring->map ); + dma_free ( &ring->map, ring->desc, ring->len ); ring->desc = NULL; ring->prod = 0; ring->cons = 0; @@ -584,7 +584,7 @@ void intel_refill_rx ( struct intel_nic *intel ) { assert ( intel->rx.iobuf[rx_idx] == NULL ); /* Allocate I/O buffer */ - iobuf = dma_alloc_rx_iob ( intel->dma, INTEL_RX_MAX_LEN, map ); + iobuf = dma_alloc_rx_iob ( intel->dma, map, INTEL_RX_MAX_LEN ); if ( ! iobuf ) { /* Wait for next refill */ break; @@ -630,7 +630,7 @@ void intel_flush ( struct intel_nic *intel ) { /* Discard unused receive buffers */ for ( i = 0 ; i < INTEL_NUM_RX_DESC ; i++ ) { if ( intel->rx.iobuf[i] ) { - dma_unmap ( intel->dma, &intel->rx.map[i] ); + dma_unmap ( &intel->rx.map[i] ); free_iob ( intel->rx.iobuf[i] ); } intel->rx.iobuf[i] = NULL; @@ -639,7 +639,7 @@ void intel_flush ( struct intel_nic *intel ) { /* Unmap incomplete transmit buffers */ for ( i = intel->tx.ring.cons ; i != intel->tx.ring.prod ; i++ ) { tx_idx = ( i % INTEL_NUM_TX_DESC ); - dma_unmap ( intel->dma, &intel->tx.map[tx_idx] ); + dma_unmap ( &intel->tx.map[tx_idx] ); } } @@ -773,7 +773,7 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { map = &intel->tx.map[tx_idx]; /* Map I/O buffer */ - if ( ( rc = dma_map_tx_iob ( intel->dma, iobuf, map ) ) != 0 ) + if ( ( rc = dma_map_tx_iob ( intel->dma, map, iobuf ) ) != 0 ) return rc; /* Update producer index */ @@ -822,7 +822,7 @@ void intel_poll_tx ( struct net_device *netdev ) { DBGC2 ( intel, "INTEL %p TX %d complete\n", intel, tx_idx ); /* Unmap I/O buffer */ - dma_unmap ( intel->dma, &intel->tx.map[tx_idx] ); + dma_unmap ( &intel->tx.map[tx_idx] ); /* Complete TX descriptor */ netdev_tx_complete_next ( netdev ); @@ -854,7 +854,7 @@ void intel_poll_rx ( struct net_device *netdev ) { return; /* Unmap I/O buffer */ - dma_unmap ( intel->dma, &intel->rx.map[rx_idx] ); + dma_unmap ( &intel->rx.map[rx_idx] ); /* Populate I/O buffer */ iobuf = intel->rx.iobuf[rx_idx]; diff --git a/src/drivers/net/intelxl.c b/src/drivers/net/intelxl.c index 0808c784a..5de432a6a 100644 --- a/src/drivers/net/intelxl.c +++ b/src/drivers/net/intelxl.c @@ -136,9 +136,9 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl, int rc; /* Map dummy target location */ - if ( ( rc = dma_map ( intelxl->dma, virt_to_phys ( &intelxl->msix.msg ), - sizeof ( intelxl->msix.msg ), DMA_RX, - &intelxl->msix.map ) ) != 0 ) { + if ( ( rc = dma_map ( intelxl->dma, &intelxl->msix.map, + virt_to_phys ( &intelxl->msix.msg ), + sizeof ( intelxl->msix.msg ), DMA_RX ) ) != 0 ) { DBGC ( intelxl, "INTELXL %p could not map MSI-X target: %s\n", intelxl, strerror ( rc ) ); goto err_map; @@ -162,7 +162,7 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl, pci_msix_disable ( pci, &intelxl->msix.cap ); err_enable: - dma_unmap ( intelxl->dma, &intelxl->msix.map ); + dma_unmap ( &intelxl->msix.map ); err_map: return rc; } @@ -183,7 +183,7 @@ void intelxl_msix_disable ( struct intelxl_nic *intelxl, pci_msix_disable ( pci, &intelxl->msix.cap ); /* Unmap dummy target location */ - dma_unmap ( intelxl->dma, &intelxl->msix.map ); + dma_unmap ( &intelxl->msix.map ); } /****************************************************************************** @@ -215,8 +215,8 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl, size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); /* Allocate admin queue */ - admin->buf = dma_alloc ( intelxl->dma, ( buf_len + len ), - INTELXL_ALIGN, &admin->map ); + admin->buf = dma_alloc ( intelxl->dma, &admin->map, ( buf_len + len ), + INTELXL_ALIGN ); if ( ! admin->buf ) return -ENOMEM; admin->desc = ( ( ( void * ) admin->buf ) + buf_len ); @@ -291,13 +291,13 @@ static void intelxl_disable_admin ( struct intelxl_nic *intelxl, * @v intelxl Intel device * @v admin Admin queue */ -static void intelxl_free_admin ( struct intelxl_nic *intelxl, +static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused, struct intelxl_admin *admin ) { size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC ); size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC ); /* Free queue */ - dma_free ( intelxl->dma, admin->buf, ( buf_len + len ), &admin->map ); + dma_free ( &admin->map, admin->buf, ( buf_len + len ) ); } /** @@ -945,8 +945,8 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl, int rc; /* Allocate descriptor ring */ - ring->desc.raw = dma_alloc ( intelxl->dma, ring->len, INTELXL_ALIGN, - &ring->map ); + ring->desc.raw = dma_alloc ( intelxl->dma, &ring->map, ring->len, + INTELXL_ALIGN ); if ( ! ring->desc.raw ) { rc = -ENOMEM; goto err_alloc; @@ -969,7 +969,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl, return 0; - dma_free ( intelxl->dma, ring->desc.raw, ring->len, &ring->map ); + dma_free ( &ring->map, ring->desc.raw, ring->len ); err_alloc: return rc; } @@ -980,11 +980,11 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl, * @v intelxl Intel device * @v ring Descriptor ring */ -void intelxl_free_ring ( struct intelxl_nic *intelxl, +void intelxl_free_ring ( struct intelxl_nic *intelxl __unused, struct intelxl_ring *ring ) { /* Free descriptor ring */ - dma_free ( intelxl->dma, ring->desc.raw, ring->len, &ring->map ); + dma_free ( &ring->map, ring->desc.raw, ring->len ); ring->desc.raw = NULL; } @@ -1322,7 +1322,7 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) { assert ( intelxl->rx.iobuf[rx_idx] == NULL ); /* Allocate I/O buffer */ - iobuf = dma_alloc_rx_iob ( intelxl->dma, intelxl->mfs, map ); + iobuf = dma_alloc_rx_iob ( intelxl->dma, map, intelxl->mfs ); if ( ! iobuf ) { /* Wait for next refill */ break; @@ -1365,7 +1365,7 @@ void intelxl_flush ( struct intelxl_nic *intelxl ) { /* Discard any unused receive buffers */ for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) { if ( intelxl->rx.iobuf[i] ) { - dma_unmap ( intelxl->dma, &intelxl->rx.map[i] ); + dma_unmap ( &intelxl->rx.map[i] ); free_iob ( intelxl->rx.iobuf[i] ); } intelxl->rx.iobuf[i] = NULL; @@ -1374,7 +1374,7 @@ void intelxl_flush ( struct intelxl_nic *intelxl ) { /* Unmap incomplete transmit buffers */ for ( i = intelxl->tx.ring.cons ; i != intelxl->tx.ring.prod ; i++ ) { tx_idx = ( i % INTELXL_TX_NUM_DESC ); - dma_unmap ( intelxl->dma, &intelxl->tx.map[tx_idx] ); + dma_unmap ( &intelxl->tx.map[tx_idx] ); } } @@ -1516,7 +1516,7 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { map = &intelxl->tx.map[tx_idx]; /* Map I/O buffer */ - if ( ( rc = dma_map_tx_iob ( intelxl->dma, iobuf, map ) ) != 0 ) + if ( ( rc = dma_map_tx_iob ( intelxl->dma, map, iobuf ) ) != 0 ) return rc; /* Update producer index */ @@ -1564,7 +1564,7 @@ static void intelxl_poll_tx ( struct net_device *netdev ) { intelxl, tx_idx ); /* Unmap I/O buffer */ - dma_unmap ( intelxl->dma, &intelxl->tx.map[tx_idx] ); + dma_unmap ( &intelxl->tx.map[tx_idx] ); /* Complete TX descriptor */ netdev_tx_complete_next ( netdev ); @@ -1597,7 +1597,7 @@ static void intelxl_poll_rx ( struct net_device *netdev ) { return; /* Unmap I/O buffer */ - dma_unmap ( intelxl->dma, &intelxl->rx.map[rx_idx] ); + dma_unmap ( &intelxl->rx.map[rx_idx] ); /* Populate I/O buffer */ iobuf = intelxl->rx.iobuf[rx_idx]; diff --git a/src/drivers/net/realtek.c b/src/drivers/net/realtek.c index bca52266f..47d435f72 100644 --- a/src/drivers/net/realtek.c +++ b/src/drivers/net/realtek.c @@ -514,7 +514,8 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) { return 0; /* Allocate buffer */ - rxbuf->data = dma_alloc ( rtl->dma, len, RTL_RXBUF_ALIGN, &rxbuf->map ); + rxbuf->data = dma_alloc ( rtl->dma, &rxbuf->map, len, + RTL_RXBUF_ALIGN ); if ( ! rxbuf->data ) return -ENOMEM; @@ -545,7 +546,7 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) { writel ( 0, rtl->regs + RTL_RBSTART ); /* Free buffer */ - dma_free ( rtl->dma, rxbuf->data, len, &rxbuf->map ); + dma_free ( &rxbuf->map, rxbuf->data, len ); rxbuf->data = NULL; rxbuf->offset = 0; } @@ -566,8 +567,8 @@ static int realtek_create_ring ( struct realtek_nic *rtl, return 0; /* Allocate descriptor ring */ - ring->desc = dma_alloc ( rtl->dma, ring->len, RTL_RING_ALIGN, - &ring->map ); + ring->desc = dma_alloc ( rtl->dma, &ring->map, ring->len, + RTL_RING_ALIGN ); if ( ! ring->desc ) return -ENOMEM; @@ -608,7 +609,7 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl, writel ( 0, rtl->regs + ring->reg + 4 ); /* Free descriptor ring */ - dma_free ( rtl->dma, ring->desc, ring->len, &ring->map ); + dma_free ( &ring->map, ring->desc, ring->len ); ring->desc = NULL; } @@ -638,7 +639,7 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) { assert ( rtl->rx.iobuf[rx_idx] == NULL ); /* Allocate I/O buffer */ - iobuf = dma_alloc_rx_iob ( rtl->dma, RTL_RX_MAX_LEN, map ); + iobuf = dma_alloc_rx_iob ( rtl->dma, map, RTL_RX_MAX_LEN ); if ( ! iobuf ) { /* Wait for next refill */ return; @@ -748,7 +749,7 @@ static void realtek_close ( struct net_device *netdev ) { /* Discard any unused receive buffers */ for ( i = 0 ; i < RTL_NUM_RX_DESC ; i++ ) { if ( rtl->rx.iobuf[i] ) { - dma_unmap ( rtl->dma, &rtl->rx.map[i] ); + dma_unmap ( &rtl->rx.map[i] ); free_iob ( rtl->rx.iobuf[i] ); } rtl->rx.iobuf[i] = NULL; @@ -756,7 +757,7 @@ static void realtek_close ( struct net_device *netdev ) { /* Unmap any incomplete transmit buffers */ for ( i = rtl->tx.ring.cons ; i != rtl->tx.ring.prod ; i++ ) - dma_unmap ( rtl->dma, &rtl->tx.map[ i % RTL_NUM_TX_DESC ] ); + dma_unmap ( &rtl->tx.map[ i % RTL_NUM_TX_DESC ] ); /* Destroy transmit descriptor ring */ realtek_destroy_ring ( rtl, &rtl->tx.ring ); @@ -796,7 +797,7 @@ static int realtek_transmit ( struct net_device *netdev, iob_pad ( iobuf, ETH_ZLEN ); /* Map I/O buffer */ - if ( ( rc = dma_map_tx_iob ( rtl->dma, iobuf, map ) ) != 0 ) + if ( ( rc = dma_map_tx_iob ( rtl->dma, map, iobuf ) ) != 0 ) return rc; address = dma ( map, iobuf->data ); @@ -870,7 +871,7 @@ static void realtek_poll_tx ( struct net_device *netdev ) { DBGC2 ( rtl, "REALTEK %p TX %d complete\n", rtl, tx_idx ); /* Unmap I/O buffer */ - dma_unmap ( rtl->dma, &rtl->tx.map[tx_idx] ); + dma_unmap ( &rtl->tx.map[tx_idx] ); /* Complete TX descriptor */ rtl->tx.ring.cons++; @@ -964,7 +965,7 @@ static void realtek_poll_rx ( struct net_device *netdev ) { return; /* Unmap buffer */ - dma_unmap ( rtl->dma, &rtl->rx.map[rx_idx] ); + dma_unmap ( &rtl->rx.map[rx_idx] ); /* Populate I/O buffer */ iobuf = rtl->rx.iobuf[rx_idx]; diff --git a/src/include/ipxe/dma.h b/src/include/ipxe/dma.h index 0577493c7..842c9d6ef 100644 --- a/src/include/ipxe/dma.h +++ b/src/include/ipxe/dma.h @@ -37,6 +37,8 @@ struct dma_mapping { * device-side DMA address. */ physaddr_t offset; + /** DMA device (if unmapping is required) */ + struct dma_device *dma; /** Platform mapping token */ void *token; }; @@ -59,14 +61,14 @@ struct dma_operations { * Map buffer for DMA * * @v dma DMA device + * @v map DMA mapping to fill in * @v addr Buffer address * @v len Length of buffer * @v flags Mapping flags - * @v map DMA mapping to fill in * @ret rc Return status code */ - int ( * map ) ( struct dma_device *dma, physaddr_t addr, size_t len, - int flags, struct dma_mapping *map ); + int ( * map ) ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ); /** * Unmap buffer * @@ -78,23 +80,23 @@ struct dma_operations { * Allocate and map DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping to fill in * @v len Length of buffer * @v align Physical alignment - * @v map DMA mapping to fill in * @ret addr Buffer address, or NULL on error */ - void * ( * alloc ) ( struct dma_device *dma, size_t len, size_t align, - struct dma_mapping *map ); + void * ( * alloc ) ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ); /** * Unmap and free DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping * @v addr Buffer address * @v len Length of buffer - * @v map DMA mapping */ - void ( * free ) ( struct dma_device *dma, void *addr, size_t len, - struct dma_mapping *map ); + void ( * free ) ( struct dma_device *dma, struct dma_mapping *map, + void *addr, size_t len ); /** * Set addressable space mask * @@ -146,21 +148,23 @@ struct dma_operations { * Map buffer for DMA * * @v dma DMA device + * @v map DMA mapping to fill in * @v addr Buffer address * @v len Length of buffer * @v flags Mapping flags - * @v map DMA mapping to fill in * @ret rc Return status code */ static inline __always_inline int DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma, + struct dma_mapping *map, physaddr_t addr __unused, - size_t len __unused, int flags __unused, - struct dma_mapping *map __unused ) { + size_t len __unused, int flags __unused ) { /* Increment mapping count (for debugging) */ - if ( DBG_LOG ) + if ( DBG_LOG ) { + map->dma = dma; dma->mapped++; + } return 0; } @@ -168,39 +172,42 @@ DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma, /** * Unmap buffer * - * @v dma DMA device * @v map DMA mapping */ static inline __always_inline void -DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_device *dma, - struct dma_mapping *map __unused ) { +DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map ) { /* Decrement mapping count (for debugging) */ - if ( DBG_LOG ) - dma->mapped--; + if ( DBG_LOG ) { + assert ( map->dma != NULL ); + map->dma->mapped--; + map->dma = NULL; + } } /** * Allocate and map DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping to fill in * @v len Length of buffer * @v align Physical alignment - * @v map DMA mapping to fill in * @ret addr Buffer address, or NULL on error */ static inline __always_inline void * DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma, - size_t len, size_t align, - struct dma_mapping *map __unused ) { + struct dma_mapping *map, + size_t len, size_t align ) { void *addr; /* Allocate buffer */ addr = malloc_phys ( len, align ); - /* Increment allocation count (for debugging) */ - if ( DBG_LOG && addr ) - dma->allocated++; + /* Increment mapping count (for debugging) */ + if ( DBG_LOG && addr ) { + map->dma = dma; + dma->mapped++; + } return addr; } @@ -208,22 +215,23 @@ DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma, /** * Unmap and free DMA-coherent buffer * - * @v dma DMA device + * @v map DMA mapping * @v addr Buffer address * @v len Length of buffer - * @v map DMA mapping */ static inline __always_inline void -DMAAPI_INLINE ( flat, dma_free ) ( struct dma_device *dma, - void *addr, size_t len, - struct dma_mapping *map __unused ) { +DMAAPI_INLINE ( flat, dma_free ) ( struct dma_mapping *map, + void *addr, size_t len ) { /* Free buffer */ free_phys ( addr, len ); - /* Decrement allocation count (for debugging) */ - if ( DBG_LOG ) - dma->allocated--; + /* Decrement mapping count (for debugging) */ + if ( DBG_LOG ) { + assert ( map->dma != NULL ); + map->dma->mapped--; + map->dma = NULL; + } } /** @@ -272,45 +280,42 @@ DMAAPI_INLINE ( op, dma_phys ) ( struct dma_mapping *map, physaddr_t addr ) { * Map buffer for DMA * * @v dma DMA device + * @v map DMA mapping to fill in * @v addr Buffer address * @v len Length of buffer * @v flags Mapping flags - * @v map DMA mapping to fill in * @ret rc Return status code */ -int dma_map ( struct dma_device *dma, physaddr_t addr, size_t len, - int flags, struct dma_mapping *map ); +int dma_map ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ); /** * Unmap buffer * - * @v dma DMA device * @v map DMA mapping */ -void dma_unmap ( struct dma_device *dma, struct dma_mapping *map ); +void dma_unmap ( struct dma_mapping *map ); /** * Allocate and map DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping to fill in * @v len Length of buffer * @v align Physical alignment - * @v map DMA mapping to fill in * @ret addr Buffer address, or NULL on error */ -void * dma_alloc ( struct dma_device *dma, size_t len, size_t align, - struct dma_mapping *map ); +void * dma_alloc ( struct dma_device *dma, struct dma_mapping *map, + size_t len, size_t align ); /** * Unmap and free DMA-coherent buffer * - * @v dma DMA device + * @v map DMA mapping * @v addr Buffer address * @v len Length of buffer - * @v map DMA mapping */ -void dma_free ( struct dma_device *dma, void *addr, size_t len, - struct dma_mapping *map ); +void dma_free ( struct dma_mapping *map, void *addr, size_t len ); /** * Set addressable space mask @@ -339,10 +344,23 @@ physaddr_t dma_phys ( struct dma_mapping *map, physaddr_t addr ); static inline __always_inline physaddr_t dma ( struct dma_mapping *map, void *addr ) { + /* Get DMA address from corresponding physical address */ return dma_phys ( map, virt_to_phys ( addr ) ); } /** + * Check if DMA unmapping is required + * + * @v map DMA mapping + * @v unmap Unmapping is required + */ +static inline __always_inline int dma_mapped ( struct dma_mapping *map ) { + + /* Unmapping is required if a DMA device was recorded */ + return ( map->dma != NULL ); +} + +/** * Initialise DMA device * * @v dma DMA device @@ -371,20 +389,21 @@ dma_set_mask_64bit ( struct dma_device *dma ) { * Map I/O buffer for transmitting data to device * * @v dma DMA device - * @v iobuf I/O buffer * @v map DMA mapping to fill in + * @v iobuf I/O buffer * @ret rc Return status code */ static inline __always_inline int -dma_map_tx_iob ( struct dma_device *dma, struct io_buffer *iobuf, - struct dma_mapping *map ) { +dma_map_tx_iob ( struct dma_device *dma, struct dma_mapping *map, + struct io_buffer *iobuf ) { /* Map I/O buffer */ - return dma_map ( dma, virt_to_phys ( iobuf->data ), iob_len ( iobuf ), - DMA_TX, map ); + return dma_map ( dma, map, virt_to_phys ( iobuf->data ), + iob_len ( iobuf ), DMA_TX ); } -extern struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len, - struct dma_mapping *map ); +extern struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, + struct dma_mapping *map, + size_t len ); #endif /* _IPXE_DMA_H */ diff --git a/src/interface/efi/efi_pci.c b/src/interface/efi/efi_pci.c index e33a8980d..7687ffb43 100644 --- a/src/interface/efi/efi_pci.c +++ b/src/interface/efi/efi_pci.c @@ -315,14 +315,14 @@ PROVIDE_PCIAPI ( efi, pci_ioremap, efipci_ioremap ); * Map buffer for DMA * * @v dma DMA device + * @v map DMA mapping to fill in * @v addr Buffer address * @v len Length of buffer * @v flags Mapping flags - * @v map DMA mapping to fill in * @ret rc Return status code */ -static int efipci_dma_map ( struct dma_device *dma, physaddr_t addr, size_t len, - int flags, struct dma_mapping *map ) { +static int efipci_dma_map ( struct dma_device *dma, struct dma_mapping *map, + physaddr_t addr, size_t len, int flags ) { struct efi_pci_device *efipci = container_of ( dma, struct efi_pci_device, pci.dma ); struct pci_device *pci = &efipci->pci; @@ -374,6 +374,7 @@ static int efipci_dma_map ( struct dma_device *dma, physaddr_t addr, size_t len, } /* Populate mapping */ + map->dma = dma; map->offset = ( bus - addr ); map->token = mapping; @@ -420,14 +421,14 @@ static void efipci_dma_unmap ( struct dma_device *dma, * Allocate and map DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping to fill in * @v len Length of buffer * @v align Physical alignment - * @v map DMA mapping to fill in * @ret addr Buffer address, or NULL on error */ -static void * efipci_dma_alloc ( struct dma_device *dma, size_t len, - size_t align __unused, - struct dma_mapping *map ) { +static void * efipci_dma_alloc ( struct dma_device *dma, + struct dma_mapping *map, + size_t len, size_t align __unused ) { struct efi_pci_device *efipci = container_of ( dma, struct efi_pci_device, pci.dma ); struct pci_device *pci = &efipci->pci; @@ -451,8 +452,8 @@ static void * efipci_dma_alloc ( struct dma_device *dma, size_t len, } /* Map buffer */ - if ( ( rc = efipci_dma_map ( dma, virt_to_phys ( addr ), len, DMA_BI, - map ) ) != 0 ) + if ( ( rc = efipci_dma_map ( dma, map, virt_to_phys ( addr ), + len, DMA_BI ) ) != 0 ) goto err_map; /* Increment allocation count (for debugging) */ @@ -472,12 +473,12 @@ static void * efipci_dma_alloc ( struct dma_device *dma, size_t len, * Unmap and free DMA-coherent buffer * * @v dma DMA device + * @v map DMA mapping * @v addr Buffer address * @v len Length of buffer - * @v map DMA mapping */ -static void efipci_dma_free ( struct dma_device *dma, void *addr, size_t len, - struct dma_mapping *map ) { +static void efipci_dma_free ( struct dma_device *dma, struct dma_mapping *map, + void *addr, size_t len ) { struct efi_pci_device *efipci = container_of ( dma, struct efi_pci_device, pci.dma ); EFI_PCI_IO_PROTOCOL *pci_io = efipci->io; |