With this we can do a run time check on the allocation type for either kernel or user using the dma attribute passed to dma-mapping iommu ops.
Signed-off-by: Abhinav abhinav.k@samsung.com --- arch/arm/mm/dma-mapping.c | 88 +++++++++++++++++++++++++++++++++++---------- 1 files changed, 69 insertions(+), 19 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 2c5a285..4cd46b4 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -428,6 +428,7 @@ static void __dma_free_remap(void *cpu_addr, size_t size) arm_vmregion_free(&consistent_head, c); }
+ #else /* !CONFIG_MMU */
#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) @@ -894,6 +895,35 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot) size_t align; size_t count = size >> PAGE_SHIFT; int bit; + unsigned long mem_type = (unsigned long)gfp; + + + if(mem_type){ + + struct page_infodma *pages_in; + + pages_in = kzalloc( sizeof(struct page_infodma*), GFP_KERNEL); + if(!pages_in) + return NULL; + + pages_in->nr_pages = count; + + return (void*)pages_in; + + } + + /* + * Align the virtual region allocation - maximum alignment is + * a section size, minimum is a page size. This helps reduce + * fragmentation of the DMA space, and also prevents allocations + * smaller than a section from crossing a section boundary. + */ + + bit = fls(size - 1); + if (bit > SECTION_SHIFT) + bit = SECTION_SHIFT; + align = 1 << bit; +
if (!consistent_pte[0]) { pr_err("%s: not initialised\n", __func__); @@ -901,16 +931,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot) return NULL; }
- /* - * Align the virtual region allocation - maximum alignment is - * a section size, minimum is a page size. This helps reduce - * fragmentation of the DMA space, and also prevents allocations - * smaller than a section from crossing a section boundary. - */ - bit = fls(size - 1); - if (bit > SECTION_SHIFT) - bit = SECTION_SHIFT; - align = 1 << bit;
/* * Allocate a virtual address in the consistent mapping region. @@ -946,6 +966,7 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot) return NULL; }
+ /* * Create a mapping in device IO address space for specified pages */ @@ -973,13 +994,16 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
len = (j - i) << PAGE_SHIFT; ret = iommu_map(mapping->domain, iova, phys, len, 0); + if (ret < 0) goto fail; + iova += len; i = j; } return dma_addr; fail: + iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); return DMA_ERROR_CODE; @@ -1007,6 +1031,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); struct page **pages; void *addr = NULL; + struct page_infodma *page_ret; + unsigned long mem_type;
*handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); @@ -1019,11 +1045,19 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (*handle == DMA_ERROR_CODE) goto err_buffer;
- addr = __iommu_alloc_remap(pages, size, gfp, prot); + mem_type = dma_get_attr(DMA_ATTR_USER_SPACE, attrs); + + addr = __iommu_alloc_remap(pages, size, mem_type, prot); if (!addr) goto err_mapping;
- return addr; + if(mem_type){ + page_ret = (struct page_infodma *)addr; + page_ret->pages = pages; + return page_ret; + } + else + return addr;
err_mapping: __iommu_remove_mapping(dev, *handle, size); @@ -1071,18 +1105,34 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { - struct arm_vmregion *c; + + unsigned long mem_type = dma_get_attr(DMA_ATTR_USER_SPACE, attrs); + size = PAGE_ALIGN(size);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); - if (c) { - struct page **pages = c->priv; - __dma_free_remap(cpu_addr, size); - __iommu_remove_mapping(dev, handle, size); - __iommu_free_buffer(dev, pages, size); + + if(mem_type){ + + struct page_infodma *pagesin = cpu_addr; + if (pagesin) { + struct page **pages = pagesin->pages; + __iommu_remove_mapping(dev, handle, size); + __iommu_free_buffer(dev, pages, size); + } + } + else{ + struct arm_vmregion *c; + c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); + if (c) { + struct page **pages = c->priv; + __dma_free_remap(cpu_addr, size); + __iommu_remove_mapping(dev, handle, size); + __iommu_free_buffer(dev, pages, size); + } } }
+ /* * Map a part of the scatter-gather list into contiguous io address space */ -- 1.7.0.4
linaro-mm-sig@lists.linaro.org