diff options
author | Kevin O'Connor <kevin@koconnor.net> | 2022-01-19 14:08:05 -0500 |
---|---|---|
committer | Kevin O'Connor <kevin@koconnor.net> | 2022-01-27 11:26:11 -0500 |
commit | 6d462830e7cf9ce7689986327be3a317f1f990da (patch) | |
tree | ba4d59fafbf6ee2e6debe1c2c8b4c1a86d799e43 /src/hw/nvme.c | |
parent | f13b650015eba78725ceaf992b5ad6858fd77769 (diff) | |
download | seabios-6d462830e7cf9ce7689986327be3a317f1f990da.tar.gz |
nvme: Only allocate one dma bounce buffer for all nvme drives
There is no need to create multiple dma bounce buffers as the BIOS
disk code isn't reentrant capable.
Also, verify that the allocation succeeds.
Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
Reviewed-by: Alexander Graf <graf@amazon.com>
Diffstat (limited to 'src/hw/nvme.c')
-rw-r--r-- | src/hw/nvme.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/src/hw/nvme.c b/src/hw/nvme.c index e9c449d6..3dfa0cec 100644 --- a/src/hw/nvme.c +++ b/src/hw/nvme.c @@ -20,6 +20,9 @@ #include "nvme.h" #include "nvme-int.h" +// Page aligned "dma bounce buffer" of size NVME_PAGE_SIZE in high memory +static void *nvme_dma_buffer; + static void * zalloc_page_aligned(struct zone_s *zone, u32 size) { @@ -257,6 +260,14 @@ nvme_probe_ns(struct nvme_ctrl *ctrl, u32 ns_idx, u8 mdts) goto free_buffer; } + if (!nvme_dma_buffer) { + nvme_dma_buffer = zalloc_page_aligned(&ZoneHigh, NVME_PAGE_SIZE); + if (!nvme_dma_buffer) { + warn_noalloc(); + goto free_buffer; + } + } + struct nvme_namespace *ns = malloc_fseg(sizeof(*ns)); if (!ns) { warn_noalloc(); @@ -294,8 +305,6 @@ nvme_probe_ns(struct nvme_ctrl *ctrl, u32 ns_idx, u8 mdts) ns->max_req_size = -1U; } - ns->dma_buffer = zalloc_page_aligned(&ZoneHigh, NVME_PAGE_SIZE); - char *desc = znprintf(MAXDESCSIZE, "NVMe NS %u: %llu MiB (%llu %u-byte " "blocks + %u-byte metadata)", ns_id, (ns->lba_count * ns->block_size) >> 20, @@ -459,12 +468,12 @@ nvme_bounce_xfer(struct nvme_namespace *ns, u64 lba, void *buf, u16 count, u16 blocks = count < max_blocks ? count : max_blocks; if (write) - memcpy(ns->dma_buffer, buf, blocks * ns->block_size); + memcpy(nvme_dma_buffer, buf, blocks * ns->block_size); - int res = nvme_io_xfer(ns, lba, ns->dma_buffer, NULL, blocks, write); + int res = nvme_io_xfer(ns, lba, nvme_dma_buffer, NULL, blocks, write); if (!write && res >= 0) - memcpy(buf, ns->dma_buffer, res * ns->block_size); + memcpy(buf, nvme_dma_buffer, res * ns->block_size); return res; } @@ -498,7 +507,7 @@ nvme_prpl_xfer(struct nvme_namespace *ns, u64 lba, void *buf, u16 count, /* Build PRP list if we need to describe more than 2 pages */ if ((ns->block_size * count) > (NVME_PAGE_SIZE * 2)) { u32 prpl_len = 0; - u64 *prpl = (void*)ns->dma_buffer; + u64 *prpl = nvme_dma_buffer; int first_page = 1; for (; size > 0; base += NVME_PAGE_SIZE, size -= NVME_PAGE_SIZE) { if (first_page) { |