Hello!
This quick update on the patchset which replaces custom consistent dma regions usage in dma-mapping framework in favour of generic vmalloc areas created on demand for each allocation. The main purpose for this patchset is to remove 2MiB limit of dma coherent/writecombine allocations.
In this version arch-independent VM_DMA flag has been replaced with ARM-specific VM_ARM_DMA_CONSISTENT flag.
This patch is based on vanilla v3.5-rc4 release.
Best regards Marek Szyprowski Samsung Poland R&D Center
Changelog:
v4: - replaced arch-independent VM_DMA flag with ARM-specific VM_ARM_DMA_CONSISTENT flag
v3: http://thread.gmane.org/gmane.linux.kernel.mm/80028/ - rebased onto v3.4-rc2: added support for IOMMU-aware implementation of dma-mapping calls, unified with CMA coherent dma pool - implemented changes requested by Minchan Kim: added more checks for vmarea->flags & VM_DMA, renamed some variables, removed obsole locks, squashed find_vm_area() exporting patch into the main redesign patch
v2: http://thread.gmane.org/gmane.linux.kernel.mm/78563 - added support for atomic allocations (served from preallocated pool) - minor cleanup here and there - rebased onto v3.4-rc7
v1: http://thread.gmane.org/gmane.linux.kernel.mm/76703 - initial version
Patch summary:
Marek Szyprowski (2): mm: vmalloc: use const void * for caller argument ARM: dma-mapping: remove custom consistent dma region
Documentation/kernel-parameters.txt | 2 +- arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/dma-mapping.c | 505 +++++++++++++---------------------- arch/arm/mm/mm.h | 3 + include/linux/vmalloc.h | 9 +- mm/vmalloc.c | 28 ++- 6 files changed, 207 insertions(+), 342 deletions(-)
'const void *' is a safer type for caller function type. This patch updates all references to caller function type.
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Reviewed-by: Kyungmin Park kyungmin.park@samsung.com Reviewed-by: Minchan Kim minchan@kernel.org --- include/linux/vmalloc.h | 8 ++++---- mm/vmalloc.c | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index dcdfc2b..2e28f4d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -32,7 +32,7 @@ struct vm_struct { struct page **pages; unsigned int nr_pages; phys_addr_t phys_addr; - void *caller; + const void *caller; };
/* @@ -62,7 +62,7 @@ extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, - pgprot_t prot, int node, void *caller); + pgprot_t prot, int node, const void *caller); extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count, @@ -85,13 +85,13 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area_caller(unsigned long size, - unsigned long flags, void *caller); + unsigned long flags, const void *caller); extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, - void *caller); + const void *caller); extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2aad499..11308f0 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1280,7 +1280,7 @@ DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist;
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, - unsigned long flags, void *caller) + unsigned long flags, const void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; @@ -1306,7 +1306,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm) }
static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, - unsigned long flags, void *caller) + unsigned long flags, const void *caller) { setup_vmalloc_vm(vm, va, flags, caller); insert_vmalloc_vmlist(vm); @@ -1314,7 +1314,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, - unsigned long end, int node, gfp_t gfp_mask, void *caller) + unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; @@ -1375,7 +1375,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, - void *caller) + const void *caller) { return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, caller); @@ -1397,7 +1397,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) }
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, - void *caller) + const void *caller) { return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, caller); @@ -1568,9 +1568,9 @@ EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, - int node, void *caller); + int node, const void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, - pgprot_t prot, int node, void *caller) + pgprot_t prot, int node, const void *caller) { const int order = 0; struct page **pages; @@ -1643,7 +1643,7 @@ fail: */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, - pgprot_t prot, int node, void *caller) + pgprot_t prot, int node, const void *caller) { struct vm_struct *area; void *addr; @@ -1699,7 +1699,7 @@ fail: */ static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, - int node, void *caller) + int node, const void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, prot, node, caller);
This patch changes dma-mapping subsystem to use generic vmalloc areas for all consistent dma allocations. This increases the total size limit of the consistent allocations and removes platform hacks and a lot of duplicated code.
Atomic allocations are served from special pool preallocated on boot, becasue vmalloc areas cannot be reliably created in atomic context.
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Reviewed-by: Kyungmin Park kyungmin.park@samsung.com --- Documentation/kernel-parameters.txt | 2 +- arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/dma-mapping.c | 505 +++++++++++++---------------------- arch/arm/mm/mm.h | 3 + include/linux/vmalloc.h | 1 + mm/vmalloc.c | 10 +- 6 files changed, 194 insertions(+), 329 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a92c5eb..da07f6c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -526,7 +526,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
coherent_pool=nn[KMG] [ARM,KNL] Sets the size of memory pool for coherent, atomic dma - allocations if Contiguous Memory Allocator (CMA) is used. + allocations.
code_bytes [X86] How many bytes of object code to print in an oops report. diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bbef15d..80777d87 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -226,7 +226,7 @@ static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struc * DMA region above it's default value of 2MB. It must be called before the * memory allocator is initialised, i.e. before any core_initcall. */ -extern void __init init_consistent_dma_size(unsigned long size); +static inline void init_consistent_dma_size(unsigned long size) { }
/* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d766e42..c1f2294 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -22,6 +22,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/iommu.h> +#include <linux/io.h> #include <linux/vmalloc.h>
#include <asm/memory.h> @@ -217,115 +218,67 @@ static void __dma_free_buffer(struct page *page, size_t size) }
#ifdef CONFIG_MMU - -#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT) - -/* - * These are the page tables (2MB each) covering uncached, DMA consistent allocations - */ -static pte_t **consistent_pte; - -#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M - -static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; - -void __init init_consistent_dma_size(unsigned long size) -{ - unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M); - - BUG_ON(consistent_pte); /* Check we're called before DMA region init */ - BUG_ON(base < VMALLOC_END); - - /* Grow region to accommodate specified size */ - if (base < consistent_base) - consistent_base = base; -} - -#include "vmregion.h" - -static struct arm_vmregion_head consistent_head = { - .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), - .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_end = CONSISTENT_END, -}; - #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif
-/* - * Initialise the consistent memory allocation. - */ -static int __init consistent_init(void) -{ - int ret = 0; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int i = 0; - unsigned long base = consistent_base; - unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; - - if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) - return 0; - - consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); - if (!consistent_pte) { - pr_err("%s: no memory\n", __func__); - return -ENOMEM; - } - - pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END); - consistent_head.vm_start = base; - - do { - pgd = pgd_offset(&init_mm, base); - - pud = pud_alloc(&init_mm, pgd, base); - if (!pud) { - pr_err("%s: no pud tables\n", __func__); - ret = -ENOMEM; - break; - } - - pmd = pmd_alloc(&init_mm, pud, base); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } - WARN_ON(!pmd_none(*pmd)); - - pte = pte_alloc_kernel(pmd, base); - if (!pte) { - pr_err("%s: no pte tables\n", __func__); - ret = -ENOMEM; - break; - } - - consistent_pte[i++] = pte; - base += PMD_SIZE; - } while (base < CONSISTENT_END); - - return ret; -} -core_initcall(consistent_init); - static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page);
-static struct arm_vmregion_head coherent_head = { - .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock), - .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), +static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, + pgprot_t prot, struct page **ret_page, + const void *caller); + +static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller) +{ + struct vm_struct *area; + unsigned long addr; + + area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, + caller); + if (!area) + return NULL; + addr = (unsigned long)area->addr; + area->phys_addr = __pfn_to_phys(page_to_pfn(page)); + + if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { + vunmap((void *)addr); + return NULL; + } + return (void *)addr; +} + +static void __dma_free_remap(void *cpu_addr, size_t size) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + if (!area || !(area->flags & VM_ARM_DMA_CONSISTENT)) { + pr_err("%s: trying to free invalid coherent area: %p\n", + __func__, cpu_addr); + dump_stack(); + return; + } + unmap_kernel_range((unsigned long)cpu_addr, size); + vunmap(cpu_addr); +} + +struct dma_pool { + size_t size; + spinlock_t lock; + unsigned long *bitmap; + unsigned long nr_pages; + void *vaddr; + struct page *page; };
-static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; +static struct dma_pool atomic_pool = { + .size = SZ_256K, +};
static int __init early_coherent_pool(char *p) { - coherent_pool_size = memparse(p, &p); + atomic_pool.size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); @@ -333,32 +286,45 @@ early_param("coherent_pool", early_coherent_pool); /* * Initialise the coherent pool for atomic allocations. */ -static int __init coherent_init(void) +static int __init atomic_pool_init(void) { + struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); - size_t size = coherent_pool_size; + unsigned long nr_pages = pool->size >> PAGE_SHIFT; + unsigned long *bitmap; struct page *page; void *ptr; + int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- if (!IS_ENABLED(CONFIG_CMA)) - return 0; + bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!bitmap) + goto no_bitmap;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page); + if (IS_ENABLED(CONFIG_CMA)) + ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); + else + ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, + &page, NULL); if (ptr) { - coherent_head.vm_start = (unsigned long) ptr; - coherent_head.vm_end = (unsigned long) ptr + size; - printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n", - (unsigned)size / 1024); + spin_lock_init(&pool->lock); + pool->vaddr = ptr; + pool->page = page; + pool->bitmap = bitmap; + pool->nr_pages = nr_pages; + pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", + (unsigned)pool->size / 1024); return 0; } - printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", - (unsigned)size / 1024); + kfree(bitmap); +no_bitmap: + pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", + (unsigned)pool->size / 1024); return -ENOMEM; } /* * CMA is activated by core_initcall, so we must be called after it. */ -postcore_initcall(coherent_init); +postcore_initcall(atomic_pool_init);
struct dma_contig_early_reserve { phys_addr_t base; @@ -406,112 +372,6 @@ void __init dma_contiguous_remap(void) } }
-static void * -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, - const void *caller) -{ - struct arm_vmregion *c; - size_t align; - int bit; - - if (!consistent_pte) { - pr_err("%s: not initialised\n", __func__); - dump_stack(); - return NULL; - } - - /* - * Align the virtual region allocation - maximum alignment is - * a section size, minimum is a page size. This helps reduce - * fragmentation of the DMA space, and also prevents allocations - * smaller than a section from crossing a section boundary. - */ - bit = fls(size - 1); - if (bit > SECTION_SHIFT) - bit = SECTION_SHIFT; - align = 1 << bit; - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = arm_vmregion_alloc(&consistent_head, align, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); - if (c) { - pte_t *pte; - int idx = CONSISTENT_PTE_INDEX(c->vm_start); - u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - - pte = consistent_pte[idx] + off; - c->priv = page; - - do { - BUG_ON(!pte_none(*pte)); - - set_pte_ext(pte, mk_pte(page, prot), 0); - page++; - pte++; - off++; - if (off >= PTRS_PER_PTE) { - off = 0; - pte = consistent_pte[++idx]; - } - } while (size -= PAGE_SIZE); - - dsb(); - - return (void *)c->vm_start; - } - return NULL; -} - -static void __dma_free_remap(void *cpu_addr, size_t size) -{ - struct arm_vmregion *c; - unsigned long addr; - pte_t *ptep; - int idx; - u32 off; - - c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); - if (!c) { - pr_err("%s: trying to free invalid coherent area: %p\n", - __func__, cpu_addr); - dump_stack(); - return; - } - - if ((c->vm_end - c->vm_start) != size) { - pr_err("%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; - } - - idx = CONSISTENT_PTE_INDEX(c->vm_start); - off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - ptep = consistent_pte[idx] + off; - addr = c->vm_start; - do { - pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); - - ptep++; - addr += PAGE_SIZE; - off++; - if (off >= PTRS_PER_PTE) { - off = 0; - ptep = consistent_pte[++idx]; - } - - if (pte_none(pte) || !pte_present(pte)) - pr_crit("%s: bad page in kernel page table\n", - __func__); - } while (size -= PAGE_SIZE); - - flush_tlb_kernel_range(c->vm_start, c->vm_end); - - arm_vmregion_free(&consistent_head, c); -} - static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { @@ -552,15 +412,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, return ptr; }
-static void *__alloc_from_pool(struct device *dev, size_t size, - struct page **ret_page, const void *caller) +static void *__alloc_from_pool(size_t size, struct page **ret_page) { - struct arm_vmregion *c; + struct dma_pool *pool = &atomic_pool; + unsigned int count = size >> PAGE_SHIFT; + unsigned int pageno; + unsigned long flags; + void *ptr = NULL; size_t align;
- if (!coherent_head.vm_start) { - printk(KERN_ERR "%s: coherent pool not initialised!\n", - __func__); + if (!pool->vaddr) { + pr_err("%s: coherent pool not initialised!\n", __func__); dump_stack(); return NULL; } @@ -571,35 +433,42 @@ static void *__alloc_from_pool(struct device *dev, size_t size, * size. This helps reduce fragmentation of the DMA space. */ align = PAGE_SIZE << get_order(size); - c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller); - if (c) { - void *ptr = (void *)c->vm_start; - struct page *page = virt_to_page(ptr); - *ret_page = page; - return ptr; + + spin_lock_irqsave(&pool->lock, flags); + pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, + 0, count, (1 << align) - 1); + if (pageno < pool->nr_pages) { + bitmap_set(pool->bitmap, pageno, count); + ptr = pool->vaddr + PAGE_SIZE * pageno; + *ret_page = pool->page + pageno; } - return NULL; + spin_unlock_irqrestore(&pool->lock, flags); + + return ptr; }
-static int __free_from_pool(void *cpu_addr, size_t size) +static int __free_from_pool(void *start, size_t size) { - unsigned long start = (unsigned long)cpu_addr; - unsigned long end = start + size; - struct arm_vmregion *c; + struct dma_pool *pool = &atomic_pool; + unsigned long pageno, count; + unsigned long flags;
- if (start < coherent_head.vm_start || end > coherent_head.vm_end) + if (start < pool->vaddr || start > pool->vaddr + pool->size) return 0;
- c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start); - - if ((c->vm_end - c->vm_start) != size) { - printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); + if (start + size > pool->vaddr + pool->size) { + pr_err("%s: freeing wrong coherent size from pool\n", __func__); dump_stack(); - size = c->vm_end - c->vm_start; + return 0; }
- arm_vmregion_free(&coherent_head, c); + pageno = (start - pool->vaddr) >> PAGE_SHIFT; + count = size >> PAGE_SHIFT; + + spin_lock_irqsave(&pool->lock, flags); + bitmap_clear(pool->bitmap, pageno, count); + spin_unlock_irqrestore(&pool->lock, flags); + return 1; }
@@ -702,10 +571,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (arch_is_coherent() || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); + else if (gfp & GFP_ATOMIC) + addr = __alloc_from_pool(size, &page); else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); - else if (gfp & GFP_ATOMIC) - addr = __alloc_from_pool(dev, size, &page, caller); else addr = __alloc_from_contiguous(dev, size, prot, &page);
@@ -741,16 +610,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, { int ret = -ENXIO; #ifdef CONFIG_MMU + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); + unsigned long off = vma->vm_pgoff; + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret;
- ret = remap_pfn_range(vma, vma->vm_start, - pfn + vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); + if (off < count && user_count <= (count - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } #endif /* CONFIG_MMU */
return ret; @@ -998,9 +873,6 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
static int __init dma_debug_do_init(void) { -#ifdef CONFIG_MMU - arm_vmregion_create_proc("dma-mappings", &consistent_head); -#endif dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } @@ -1117,61 +989,32 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s * Create a CPU mapping for a specified pages */ static void * -__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot) +__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller) { - struct arm_vmregion *c; - size_t align; - size_t count = size >> PAGE_SHIFT; - int bit; + unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct vm_struct *area; + unsigned long p;
- if (!consistent_pte[0]) { - pr_err("%s: not initialised\n", __func__); - dump_stack(); + area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, + caller); + if (!area) return NULL; - } - - /* - * Align the virtual region allocation - maximum alignment is - * a section size, minimum is a page size. This helps reduce - * fragmentation of the DMA space, and also prevents allocations - * smaller than a section from crossing a section boundary. - */ - bit = fls(size - 1); - if (bit > SECTION_SHIFT) - bit = SECTION_SHIFT; - align = 1 << bit; - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = arm_vmregion_alloc(&consistent_head, align, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL); - if (c) { - pte_t *pte; - int idx = CONSISTENT_PTE_INDEX(c->vm_start); - int i = 0; - u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - - pte = consistent_pte[idx] + off; - c->priv = pages; - - do { - BUG_ON(!pte_none(*pte)); - - set_pte_ext(pte, mk_pte(pages[i], prot), 0); - pte++; - off++; - i++; - if (off >= PTRS_PER_PTE) { - off = 0; - pte = consistent_pte[++idx]; - } - } while (i < count);
- dsb(); + area->pages = pages; + area->nr_pages = nr_pages; + p = (unsigned long)area->addr;
- return (void *)c->vm_start; + for (i = 0; i < nr_pages; i++) { + phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); + if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) + goto err; + p += PAGE_SIZE; } + return area->addr; +err: + unmap_kernel_range((unsigned long)area->addr, size); + vunmap(area->addr); return NULL; }
@@ -1230,6 +1073,16 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; }
+static struct page **__iommu_get_pages(void *cpu_addr) +{ + struct vm_struct *area; + + area = find_vm_area(cpu_addr); + if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) + return area->pages; + return NULL; +} + static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { @@ -1248,7 +1101,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (*handle == DMA_ERROR_CODE) goto err_buffer;
- addr = __iommu_alloc_remap(pages, size, gfp, prot); + addr = __iommu_alloc_remap(pages, size, gfp, prot, + __builtin_return_address(0)); if (!addr) goto err_mapping;
@@ -1265,31 +1119,25 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { - struct arm_vmregion *c; + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + struct page **pages = __iommu_get_pages(cpu_addr);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); - c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) { - struct page **pages = c->priv; + if (!pages) + return -ENXIO;
- unsigned long uaddr = vma->vm_start; - unsigned long usize = vma->vm_end - vma->vm_start; - int i = 0; + do { + int ret = vm_insert_page(vma, uaddr, *pages++); + if (ret) { + pr_err("Remapping memory failed: %d\n", ret); + return ret; + } + uaddr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0);
- do { - int ret; - - ret = vm_insert_page(vma, uaddr, pages[i++]); - if (ret) { - pr_err("Remapping memory, error: %d\n", ret); - return ret; - } - - uaddr += PAGE_SIZE; - usize -= PAGE_SIZE; - } while (usize > 0); - } return 0; }
@@ -1300,16 +1148,21 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { - struct arm_vmregion *c; + struct page **pages = __iommu_get_pages(cpu_addr); size = PAGE_ALIGN(size);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); - if (c) { - struct page **pages = c->priv; - __dma_free_remap(cpu_addr, size); - __iommu_remove_mapping(dev, handle, size); - __iommu_free_buffer(dev, pages, size); + if (!pages) { + pr_err("%s: trying to free invalid coherent area: %p\n", + __func__, cpu_addr); + dump_stack(); + return; } + + unmap_kernel_range((unsigned long)cpu_addr, size); + vunmap(cpu_addr); + + __iommu_remove_mapping(dev, handle, size); + __iommu_free_buffer(dev, pages, size); }
/* diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index c471436..d9b47a1 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -59,6 +59,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20)
+/* consistent regions used by dma_alloc_attrs() */ +#define VM_ARM_DMA_CONSISTENT 0x20000000 + #endif
#ifdef CONFIG_ZONE_DMA diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 2e28f4d..6071e91 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -93,6 +93,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long start, unsigned long end, const void *caller); extern struct vm_struct *remove_vm_area(const void *addr); +extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 11308f0..65fc4dc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1403,7 +1403,15 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, -1, GFP_KERNEL, caller); }
-static struct vm_struct *find_vm_area(const void *addr) +/** + * find_vm_area - find a continuous kernel virtual area + * @addr: base address + * + * Search for the kernel VM area starting at @addr, and return it. + * It is up to the caller to do all required locking to keep the returned + * pointer valid. + */ +struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va;
On 06/25/2012 05:47 PM, Marek Szyprowski wrote:
This patch changes dma-mapping subsystem to use generic vmalloc areas for all consistent dma allocations. This increases the total size limit of the consistent allocations and removes platform hacks and a lot of duplicated code.
Atomic allocations are served from special pool preallocated on boot, becasue vmalloc areas cannot be reliably created in atomic context.
s/becasue/because
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Reviewed-by: Kyungmin Park kyungmin.park@samsung.com
I like this patch very much but notice one more again. Here is goes, again. https://lkml.org/lkml/2012/6/13/628 It's another hack for avoding vmalloc atomic. There are several grumblers ago and everybody made own mempool so code began to bloat and it would be error-prone. :( I think it's TODO for mm folks to decide.
Anyway, there are some trivial comment below.
Documentation/kernel-parameters.txt | 2 +- arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/dma-mapping.c | 505 +++++++++++++---------------------- arch/arm/mm/mm.h | 3 + include/linux/vmalloc.h | 1 + mm/vmalloc.c | 10 +- 6 files changed, 194 insertions(+), 329 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a92c5eb..da07f6c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -526,7 +526,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. coherent_pool=nn[KMG] [ARM,KNL] Sets the size of memory pool for coherent, atomic dma
allocations if Contiguous Memory Allocator (CMA) is used.
allocations.
What's the default? 256K? If we specify, couldn't it help?
code_bytes [X86] How many bytes of object code to print in an oops report. diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bbef15d..80777d87 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -226,7 +226,7 @@ static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struc
- DMA region above it's default value of 2MB. It must be called before the
- memory allocator is initialised, i.e. before any core_initcall.
*/ -extern void __init init_consistent_dma_size(unsigned long size); +static inline void init_consistent_dma_size(unsigned long size) { } /*
- For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d766e42..c1f2294 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -22,6 +22,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/iommu.h> +#include <linux/io.h> #include <linux/vmalloc.h> #include <asm/memory.h> @@ -217,115 +218,67 @@ static void __dma_free_buffer(struct page *page, size_t size) } #ifdef CONFIG_MMU
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
-/*
- These are the page tables (2MB each) covering uncached, DMA consistent allocations
- */
-static pte_t **consistent_pte;
-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
-static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
-void __init init_consistent_dma_size(unsigned long size) -{
- unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
- BUG_ON(consistent_pte); /* Check we're called before DMA region init */
- BUG_ON(base < VMALLOC_END);
- /* Grow region to accommodate specified size */
- if (base < consistent_base)
consistent_base = base;
-}
-#include "vmregion.h"
-static struct arm_vmregion_head consistent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
- .vm_end = CONSISTENT_END,
-};
#ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif -/*
- Initialise the consistent memory allocation.
- */
-static int __init consistent_init(void) -{
- int ret = 0;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i = 0;
- unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
- if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
- consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
- if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
return -ENOMEM;
- }
- pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
- consistent_head.vm_start = base;
- do {
pgd = pgd_offset(&init_mm, base);
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte[i++] = pte;
base += PMD_SIZE;
- } while (base < CONSISTENT_END);
- return ret;
-} -core_initcall(consistent_init);
static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page); -static struct arm_vmregion_head coherent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller);
+static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
+{
- struct vm_struct *area;
- unsigned long addr;
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
caller);
Please write down why we always need VM_USERMAP. If we always need it in ARM, why don't you define following as? #define VM_ARM_DMA_CONSISTENT (0x20000000 | VM_USERAMP)? Although it's trivial, it could be more understandable that everybody can think of it that "ARM DMA allocation could be mapped by user space, everytime"
- if (!area)
return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = __pfn_to_phys(page_to_pfn(page));
- if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
vunmap((void *)addr);
return NULL;
- }
- return (void *)addr;
+}
+static void __dma_free_remap(void *cpu_addr, size_t size) +{
- struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || !(area->flags & VM_ARM_DMA_CONSISTENT)) {
Above definery could enhance this check, too.
pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
- }
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+}
+struct dma_pool {
- size_t size;
- spinlock_t lock;
- unsigned long *bitmap;
- unsigned long nr_pages;
- void *vaddr;
- struct page *page;
}; -static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; +static struct dma_pool atomic_pool = {
- .size = SZ_256K,
+}; static int __init early_coherent_pool(char *p) {
- coherent_pool_size = memparse(p, &p);
- atomic_pool.size = memparse(p, &p); return 0;
} early_param("coherent_pool", early_coherent_pool); @@ -333,32 +286,45 @@ early_param("coherent_pool", early_coherent_pool); /*
- Initialise the coherent pool for atomic allocations.
*/ -static int __init coherent_init(void) +static int __init atomic_pool_init(void) {
- struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
- size_t size = coherent_pool_size;
- unsigned long nr_pages = pool->size >> PAGE_SHIFT;
- unsigned long *bitmap; struct page *page; void *ptr;
- int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- if (!IS_ENABLED(CONFIG_CMA))
return 0;
- bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!bitmap)
goto no_bitmap;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page);
- if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
- else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
if (ptr) {&page, NULL);
coherent_head.vm_start = (unsigned long) ptr;
coherent_head.vm_end = (unsigned long) ptr + size;
printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)size / 1024);
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
pool->page = page;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
return 0; }(unsigned)pool->size / 1024);
- printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
(unsigned)size / 1024);
- kfree(bitmap);
+no_bitmap:
- pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
return -ENOMEM;(unsigned)pool->size / 1024);
} /*
- CMA is activated by core_initcall, so we must be called after it.
*/ -postcore_initcall(coherent_init); +postcore_initcall(atomic_pool_init); struct dma_contig_early_reserve { phys_addr_t base; @@ -406,112 +372,6 @@ void __init dma_contiguous_remap(void) } } -static void * -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- struct arm_vmregion *c;
- size_t align;
- int bit;
- if (!consistent_pte) {
pr_err("%s: not initialised\n", __func__);
dump_stack();
return NULL;
- }
- /*
* Align the virtual region allocation - maximum alignment is
* a section size, minimum is a page size. This helps reduce
* fragmentation of the DMA space, and also prevents allocations
* smaller than a section from crossing a section boundary.
*/
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
bit = SECTION_SHIFT;
- align = 1 << bit;
- /*
* Allocate a virtual address in the consistent mapping region.
*/
- c = arm_vmregion_alloc(&consistent_head, align, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
- if (c) {
pte_t *pte;
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
c->priv = page;
do {
BUG_ON(!pte_none(*pte));
set_pte_ext(pte, mk_pte(page, prot), 0);
page++;
pte++;
off++;
if (off >= PTRS_PER_PTE) {
off = 0;
pte = consistent_pte[++idx];
}
} while (size -= PAGE_SIZE);
dsb();
return (void *)c->vm_start;
- }
- return NULL;
-}
-static void __dma_free_remap(void *cpu_addr, size_t size) -{
- struct arm_vmregion *c;
- unsigned long addr;
- pte_t *ptep;
- int idx;
- u32 off;
- c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
- if (!c) {
pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
- }
- if ((c->vm_end - c->vm_start) != size) {
pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
- }
- idx = CONSISTENT_PTE_INDEX(c->vm_start);
- off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
- ptep = consistent_pte[idx] + off;
- addr = c->vm_start;
- do {
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
ptep++;
addr += PAGE_SIZE;
off++;
if (off >= PTRS_PER_PTE) {
off = 0;
ptep = consistent_pte[++idx];
}
if (pte_none(pte) || !pte_present(pte))
pr_crit("%s: bad page in kernel page table\n",
__func__);
- } while (size -= PAGE_SIZE);
- flush_tlb_kernel_range(c->vm_start, c->vm_end);
- arm_vmregion_free(&consistent_head, c);
-}
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { @@ -552,15 +412,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, return ptr; } -static void *__alloc_from_pool(struct device *dev, size_t size,
struct page **ret_page, const void *caller)
+static void *__alloc_from_pool(size_t size, struct page **ret_page) {
- struct arm_vmregion *c;
- struct dma_pool *pool = &atomic_pool;
- unsigned int count = size >> PAGE_SHIFT;
Just out of curiosity. Do we make sure size is always aligned to PAGE_SIZE? If so, please write down
- unsigned int pageno;
- unsigned long flags;
- void *ptr = NULL; size_t align;
- if (!coherent_head.vm_start) {
printk(KERN_ERR "%s: coherent pool not initialised!\n",
__func__);
- if (!pool->vaddr) {
dump_stack();pr_err("%s: coherent pool not initialised!\n", __func__);
Why doesn't you use WARN?
return NULL;
}
< snip>
Hello,
On Wednesday, June 27, 2012 3:55 AM Minchan Kim wrote:
On 06/25/2012 05:47 PM, Marek Szyprowski wrote:
This patch changes dma-mapping subsystem to use generic vmalloc areas for all consistent dma allocations. This increases the total size limit of the consistent allocations and removes platform hacks and a lot of duplicated code.
Atomic allocations are served from special pool preallocated on boot, becasue vmalloc areas cannot be reliably created in atomic context.
s/becasue/because
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Reviewed-by: Kyungmin Park kyungmin.park@samsung.com
I like this patch very much but notice one more again. Here is goes, again. https://lkml.org/lkml/2012/6/13/628 It's another hack for avoding vmalloc atomic. There are several grumblers ago and everybody made own mempool so code began to bloat and it would be error-prone. :( I think it's TODO for mm folks to decide.
I've read the discussion. This case is very similar we also might need to be called from atomic context. I hope that this issue will be resolved by mm developers one day, so my pool based workaround can be removed.
Anyway, there are some trivial comment below.
Thanks, I will send an updated version soon.
Documentation/kernel-parameters.txt | 2 +- arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/dma-mapping.c | 505 +++++++++++++---------------------- arch/arm/mm/mm.h | 3 + include/linux/vmalloc.h | 1 + mm/vmalloc.c | 10 +- 6 files changed, 194 insertions(+), 329 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a92c5eb..da07f6c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -526,7 +526,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
coherent_pool=nn[KMG] [ARM,KNL] Sets the size of memory pool for coherent, atomic dma
allocations if Contiguous Memory Allocator (CMA) is used.
allocations.
What's the default? 256K? If we specify, couldn't it help?
Good idea.
code_bytes [X86] How many bytes of object code to print in an oops report. diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bbef15d..80777d87 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -226,7 +226,7 @@ static inline int dma_mmap_writecombine(struct device *dev, struct
vm_area_struc
- DMA region above it's default value of 2MB. It must be called before the
- memory allocator is initialised, i.e. before any core_initcall.
*/ -extern void __init init_consistent_dma_size(unsigned long size); +static inline void init_consistent_dma_size(unsigned long size) { }
/*
- For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d766e42..c1f2294 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -22,6 +22,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/iommu.h> +#include <linux/io.h> #include <linux/vmalloc.h>
#include <asm/memory.h> @@ -217,115 +218,67 @@ static void __dma_free_buffer(struct page *page, size_t size) }
#ifdef CONFIG_MMU
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
-/*
- These are the page tables (2MB each) covering uncached, DMA consistent allocations
- */
-static pte_t **consistent_pte;
-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
-static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
-void __init init_consistent_dma_size(unsigned long size) -{
- unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
- BUG_ON(consistent_pte); /* Check we're called before DMA region init */
- BUG_ON(base < VMALLOC_END);
- /* Grow region to accommodate specified size */
- if (base < consistent_base)
consistent_base = base;
-}
-#include "vmregion.h"
-static struct arm_vmregion_head consistent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
- .vm_end = CONSISTENT_END,
-};
#ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif
-/*
- Initialise the consistent memory allocation.
- */
-static int __init consistent_init(void) -{
- int ret = 0;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i = 0;
- unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
- if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
- consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
- if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
return -ENOMEM;
- }
- pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
- consistent_head.vm_start = base;
- do {
pgd = pgd_offset(&init_mm, base);
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte[i++] = pte;
base += PMD_SIZE;
- } while (base < CONSISTENT_END);
- return ret;
-} -core_initcall(consistent_init);
static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page);
-static struct arm_vmregion_head coherent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller);
+static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
+{
- struct vm_struct *area;
- unsigned long addr;
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
caller);
Please write down why we always need VM_USERMAP. If we always need it in ARM, why don't you define following as? #define VM_ARM_DMA_CONSISTENT (0x20000000 | VM_USERAMP)? Although it's trivial, it could be more understandable that everybody can think of it that "ARM DMA allocation could be mapped by user space, everytime"
The fact that DMA allocation can be mapped to userspace is not specific to ARM, so I will keep using (VM_ARM_DMA_CONSISTENT | VM_USERMAP) and add a comment about it.
- if (!area)
return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = __pfn_to_phys(page_to_pfn(page));
- if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
vunmap((void *)addr);
return NULL;
- }
- return (void *)addr;
+}
+static void __dma_free_remap(void *cpu_addr, size_t size) +{
- struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || !(area->flags & VM_ARM_DMA_CONSISTENT)) {
Above definery could enhance this check, too.
pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
- }
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+}
+struct dma_pool {
- size_t size;
- spinlock_t lock;
- unsigned long *bitmap;
- unsigned long nr_pages;
- void *vaddr;
- struct page *page;
};
-static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; +static struct dma_pool atomic_pool = {
- .size = SZ_256K,
+};
static int __init early_coherent_pool(char *p) {
- coherent_pool_size = memparse(p, &p);
- atomic_pool.size = memparse(p, &p); return 0;
} early_param("coherent_pool", early_coherent_pool); @@ -333,32 +286,45 @@ early_param("coherent_pool", early_coherent_pool); /*
- Initialise the coherent pool for atomic allocations.
*/ -static int __init coherent_init(void) +static int __init atomic_pool_init(void) {
- struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
- size_t size = coherent_pool_size;
- unsigned long nr_pages = pool->size >> PAGE_SHIFT;
- unsigned long *bitmap; struct page *page; void *ptr;
- int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- if (!IS_ENABLED(CONFIG_CMA))
return 0;
- bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!bitmap)
goto no_bitmap;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page);
- if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
- else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
if (ptr) {&page, NULL);
coherent_head.vm_start = (unsigned long) ptr;
coherent_head.vm_end = (unsigned long) ptr + size;
printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)size / 1024);
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
pool->page = page;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
return 0; }(unsigned)pool->size / 1024);
- printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
(unsigned)size / 1024);
- kfree(bitmap);
+no_bitmap:
- pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
return -ENOMEM;(unsigned)pool->size / 1024);
} /*
- CMA is activated by core_initcall, so we must be called after it.
*/ -postcore_initcall(coherent_init); +postcore_initcall(atomic_pool_init);
struct dma_contig_early_reserve { phys_addr_t base; @@ -406,112 +372,6 @@ void __init dma_contiguous_remap(void) } }
-static void * -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- struct arm_vmregion *c;
- size_t align;
- int bit;
- if (!consistent_pte) {
pr_err("%s: not initialised\n", __func__);
dump_stack();
return NULL;
- }
- /*
* Align the virtual region allocation - maximum alignment is
* a section size, minimum is a page size. This helps reduce
* fragmentation of the DMA space, and also prevents allocations
* smaller than a section from crossing a section boundary.
*/
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
bit = SECTION_SHIFT;
- align = 1 << bit;
- /*
* Allocate a virtual address in the consistent mapping region.
*/
- c = arm_vmregion_alloc(&consistent_head, align, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
- if (c) {
pte_t *pte;
int idx = CONSISTENT_PTE_INDEX(c->vm_start);
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
c->priv = page;
do {
BUG_ON(!pte_none(*pte));
set_pte_ext(pte, mk_pte(page, prot), 0);
page++;
pte++;
off++;
if (off >= PTRS_PER_PTE) {
off = 0;
pte = consistent_pte[++idx];
}
} while (size -= PAGE_SIZE);
dsb();
return (void *)c->vm_start;
- }
- return NULL;
-}
-static void __dma_free_remap(void *cpu_addr, size_t size) -{
- struct arm_vmregion *c;
- unsigned long addr;
- pte_t *ptep;
- int idx;
- u32 off;
- c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
- if (!c) {
pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
- }
- if ((c->vm_end - c->vm_start) != size) {
pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
- }
- idx = CONSISTENT_PTE_INDEX(c->vm_start);
- off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
- ptep = consistent_pte[idx] + off;
- addr = c->vm_start;
- do {
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
ptep++;
addr += PAGE_SIZE;
off++;
if (off >= PTRS_PER_PTE) {
off = 0;
ptep = consistent_pte[++idx];
}
if (pte_none(pte) || !pte_present(pte))
pr_crit("%s: bad page in kernel page table\n",
__func__);
- } while (size -= PAGE_SIZE);
- flush_tlb_kernel_range(c->vm_start, c->vm_end);
- arm_vmregion_free(&consistent_head, c);
-}
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { @@ -552,15 +412,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size,
gfp_t gfp,
return ptr; }
-static void *__alloc_from_pool(struct device *dev, size_t size,
struct page **ret_page, const void *caller)
+static void *__alloc_from_pool(size_t size, struct page **ret_page) {
- struct arm_vmregion *c;
- struct dma_pool *pool = &atomic_pool;
- unsigned int count = size >> PAGE_SHIFT;
Just out of curiosity. Do we make sure size is always aligned to PAGE_SIZE? If so, please write down
Ok, thanks for spotting it.
- unsigned int pageno;
- unsigned long flags;
- void *ptr = NULL; size_t align;
- if (!coherent_head.vm_start) {
printk(KERN_ERR "%s: coherent pool not initialised!\n",
__func__);
- if (!pool->vaddr) {
dump_stack();pr_err("%s: coherent pool not initialised!\n", __func__);
Why doesn't you use WARN?
Ok, I will use WARN here.
Best regards
This patch fixes the following build break on no-MMU ARM architectures:
arch/arm/mm/dma-mapping.c:576:39: error: macro "__alloc_from_pool" requires 4 arguments, but only 2 given arch/arm/mm/dma-mapping.c: In function ‘__dma_alloc’: arch/arm/mm/dma-mapping.c:576: error: ‘__alloc_from_pool’ undeclared (first use in this function) arch/arm/mm/dma-mapping.c:576: error: (Each undeclared identifier is reported only once arch/arm/mm/dma-mapping.c:576: error: for each function it appears in.) make[1]: *** [arch/arm/mm/dma-mapping.o] Error 1
Reported-by: Paul Gortmaker paul.gortmaker@windriver.com Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com --- arch/arm/mm/dma-mapping.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c10dab8..c65a180 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -514,7 +514,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL -#define __alloc_from_pool(dev, size, ret_page, c) NULL +#define __alloc_from_pool(size, ret_page) NULL #define __alloc_from_contiguous(dev, size, prot, ret) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, size) do { } while (0)
Hi Marek,
On Mon, 25 Jun 2012 10:47:27 +0200 Marek Szyprowski m.szyprowski@samsung.com wrote:
This patch changes dma-mapping subsystem to use generic vmalloc areas for all consistent dma allocations. This increases the total size limit of the consistent allocations and removes platform hacks and a lot of duplicated code.
Atomic allocations are served from special pool preallocated on boot, becasue vmalloc areas cannot be reliably created in atomic context.
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Reviewed-by: Kyungmin Park kyungmin.park@samsung.com
Documentation/kernel-parameters.txt | 2 +- arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/dma-mapping.c | 505 +++++++++++++---------------------- arch/arm/mm/mm.h | 3 + include/linux/vmalloc.h | 1 + mm/vmalloc.c | 10 +- 6 files changed, 194 insertions(+), 329 deletions(-)
......
-static int __init consistent_init(void) -{
int ret = 0;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i = 0;
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
return -ENOMEM;
}
pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
consistent_head.vm_start = base;
do {
pgd = pgd_offset(&init_mm, base);
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte[i++] = pte;
base += PMD_SIZE;
} while (base < CONSISTENT_END);
return ret;
-} -core_initcall(consistent_init);
static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page);
-static struct arm_vmregion_head coherent_head = {
.vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller);
+static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
+{
struct vm_struct *area;
unsigned long addr;
area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
area->phys_addr = __pfn_to_phys(page_to_pfn(page));
if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
vunmap((void *)addr);
return NULL;
}
return (void *)addr;
+}
The above "ioremap_page_range()" seems to be executed against normal pages(liner kernel mapping) with setting a new prot, because pages were passed from __dma_alloc_buffer(){..alloc_pages()...}. For me, this is creating another page mapping with different pgprot, and it can cause the pgprot inconsistency. This reminds me of the following old patch.
[RFC PATCH] Avoid aliasing mappings in DMA coherent allocator http://lists.infradead.org/pipermail/linux-arm-kernel/2012-June/106815.html
I think that this is why ioremap() isn't allowed with RAM.
__arm_ioremap_pfn_caller() doens't allow RAM remapping.
193 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 194 unsigned long offset, size_t size, unsigned int mtype, void *caller) 195 { 196 const struct mem_type *type; 197 int err; ... . 240 /* 241 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 242 */ 243 if (WARN_ON(pfn_valid(pfn))) 244 return NULL; ...
So my question is: 1, is the above ioremap_page_range() creating another page mapping with a new pgprot, in addition to liner mapping? 2, If so, is it safe for pgprot inconsistency from different vaddrs?
I hope that my questins are making sense.
Hello,
On Monday, July 02, 2012 1:06 PM Hiroshi Doyu wrote:
On Mon, 25 Jun 2012 10:47:27 +0200 Marek Szyprowski m.szyprowski@samsung.com wrote:
This patch changes dma-mapping subsystem to use generic vmalloc areas for all consistent dma allocations. This increases the total size limit of the consistent allocations and removes platform hacks and a lot of duplicated code.
Atomic allocations are served from special pool preallocated on boot, becasue vmalloc areas cannot be reliably created in atomic context.
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Reviewed-by: Kyungmin Park kyungmin.park@samsung.com
Documentation/kernel-parameters.txt | 2 +- arch/arm/include/asm/dma-mapping.h | 2 +- arch/arm/mm/dma-mapping.c | 505 +++++++++++++---------------------- arch/arm/mm/mm.h | 3 + include/linux/vmalloc.h | 1 + mm/vmalloc.c | 10 +- 6 files changed, 194 insertions(+), 329 deletions(-)
......
-static int __init consistent_init(void) -{
int ret = 0;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int i = 0;
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
return -ENOMEM;
}
pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
consistent_head.vm_start = base;
do {
pgd = pgd_offset(&init_mm, base);
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte[i++] = pte;
base += PMD_SIZE;
} while (base < CONSISTENT_END);
return ret;
-} -core_initcall(consistent_init);
static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page);
-static struct arm_vmregion_head coherent_head = {
.vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller);
+static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
+{
struct vm_struct *area;
unsigned long addr;
area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
area->phys_addr = __pfn_to_phys(page_to_pfn(page));
if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
vunmap((void *)addr);
return NULL;
}
return (void *)addr;
+}
The above "ioremap_page_range()" seems to be executed against normal pages(liner kernel mapping) with setting a new prot, because pages were passed from __dma_alloc_buffer(){..alloc_pages()...}. For me, this is creating another page mapping with different pgprot, and it can cause the pgprot inconsistency. This reminds me of the following old patch.
[RFC PATCH] Avoid aliasing mappings in DMA coherent allocator http://lists.infradead.org/pipermail/linux-arm-kernel/2012-June/106815.html
If I remember correctly that approach has been dropped because: a) it consumed a fixed, quite large amount of RAM only for DMA mapping purposes what was considered as a waste of resources b) didn't work with some hardware configurations which had DMA zone less than 2MiB.
I think that this is why ioremap() isn't allowed with RAM.
__arm_ioremap_pfn_caller() doens't allow RAM remapping.
193 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 194 unsigned long offset, size_t size, unsigned int mtype, void *caller) 195 { 196 const struct mem_type *type; 197 int err; ... . 240 /* 241 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 242 */ 243 if (WARN_ON(pfn_valid(pfn))) 244 return NULL; ...
So my question is: 1, is the above ioremap_page_range() creating another page mapping with a new pgprot, in addition to liner mapping?
Yes it does. My patch does exactly the same thing a the existing __dma_alloc_remap() by using a generic vmalloc helper functions.
2, If so, is it safe for pgprot inconsistency from different vaddrs?
It probably depends on the hardware. Right now, although specification says this is a violation, no side effects has been observed and such solution is already used for years.
I hope that my questins are making sense.
Best regards
linaro-mm-sig@lists.linaro.org