From 00085f1efa387a8ce100e3734920f7639c80caa3 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 3 Aug 2016 13:46:00 -0700 Subject: dma-mapping: use unsigned long for dma_attrs The dma-mapping core and the implementations do not change the DMA attributes passed by pointer. Thus the pointer can point to const data. However the attributes do not have to be a bitfield. Instead unsigned long will do fine: 1. This is just simpler. Both in terms of reading the code and setting attributes. Instead of initializing local attributes on the stack and passing pointer to it to dma_set_attr(), just set the bits. 2. It brings safeness and checking for const correctness because the attributes are passed by value. Semantic patches for this change (at least most of them): virtual patch virtual context @r@ identifier f, attrs; @@ f(..., - struct dma_attrs *attrs + unsigned long attrs , ...) { ... } @@ identifier r.f; @@ f(..., - NULL + 0 ) and // Options: --all-includes virtual patch virtual context @r@ identifier f, attrs; type t; @@ t f(..., struct dma_attrs *attrs); @@ identifier r.f; @@ f(..., - NULL + 0 ) Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com Signed-off-by: Krzysztof Kozlowski Acked-by: Vineet Gupta Acked-by: Robin Murphy Acked-by: Hans-Christian Noren Egtvedt Acked-by: Mark Salter [c6x] Acked-by: Jesper Nilsson [cris] Acked-by: Daniel Vetter [drm] Reviewed-by: Bart Van Assche Acked-by: Joerg Roedel [iommu] Acked-by: Fabien Dessenne [bdisp] Reviewed-by: Marek Szyprowski [vb2-core] Acked-by: David Vrabel [xen] Acked-by: Konrad Rzeszutek Wilk [xen swiotlb] Acked-by: Joerg Roedel [iommu] Acked-by: Richard Kuo [hexagon] Acked-by: Geert Uytterhoeven [m68k] Acked-by: Gerald Schaefer [s390] Acked-by: Bjorn Andersson Acked-by: Hans-Christian Noren Egtvedt [avr32] Acked-by: Vineet Gupta [arc] Acked-by: Robin Murphy [arm64 and dma-iommu] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/dma-mapping.c | 66 ++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index f6c55afab3e2..c4284c432ae8 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -32,10 +32,10 @@ static int swiotlb __read_mostly; -static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, +static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) { - if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) return pgprot_writecombine(prot); return prot; } @@ -91,7 +91,7 @@ static int __free_from_pool(void *start, size_t size) static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); @@ -121,7 +121,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, static void __dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { bool freed; phys_addr_t paddr = dma_to_phys(dev, dma_handle); @@ -140,7 +140,7 @@ static void __dma_free_coherent(struct device *dev, size_t size, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { struct page *page; void *ptr, *coherent_ptr; @@ -188,7 +188,7 @@ no_mem: static void __dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); @@ -205,7 +205,7 @@ static void __dma_free(struct device *dev, size_t size, static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t dev_addr; @@ -219,7 +219,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { if (!is_device_dma_coherent(dev)) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); @@ -228,7 +228,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i, ret; @@ -245,7 +245,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, static void __swiotlb_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -306,7 +306,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, static int __swiotlb_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = -ENXIO; unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> @@ -333,7 +333,7 @@ static int __swiotlb_mmap(struct device *dev, static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); @@ -435,21 +435,21 @@ out: static void *__dummy_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { return NULL; } static void __dummy_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { } static int __dummy_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { return -ENXIO; } @@ -457,20 +457,20 @@ static int __dummy_mmap(struct device *dev, static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return DMA_ERROR_CODE; } static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { } static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return 0; } @@ -478,7 +478,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, static void __dummy_unmap_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { } @@ -553,7 +553,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { bool coherent = is_device_dma_coherent(dev); int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); @@ -613,7 +613,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, } static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) + dma_addr_t handle, unsigned long attrs) { size_t iosize = size; @@ -629,7 +629,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * Hence how dodgy the below logic looks... */ if (__in_atomic_pool(cpu_addr, size)) { - iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, 0); __free_from_pool(cpu_addr, size); } else if (is_vmalloc_addr(cpu_addr)){ struct vm_struct *area = find_vm_area(cpu_addr); @@ -639,14 +639,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { - iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, 0); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { struct vm_struct *area; int ret; @@ -666,7 +666,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, - size_t size, struct dma_attrs *attrs) + size_t size, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); @@ -707,14 +707,14 @@ static void __iommu_sync_single_for_device(struct device *dev, static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { bool coherent = is_device_dma_coherent(dev); int prot = dma_direction_to_prot(dir, coherent); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); if (!iommu_dma_mapping_error(dev, dev_addr) && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_single_for_device(dev, dev_addr, size, dir); return dev_addr; @@ -722,9 +722,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); @@ -760,11 +760,11 @@ static void __iommu_sync_sg_for_device(struct device *dev, static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { bool coherent = is_device_dma_coherent(dev); - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_device(dev, sgl, nelems, dir); return iommu_dma_map_sg(dev, sgl, nelems, @@ -774,9 +774,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, static void __iommu_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); -- cgit