The goal of those patches is to allow ION clients (drivers or userland applications) to use Contiguous Memory Allocator (CMA).
To get more info about CMA: http://lists.linaro.org/pipermail/linaro-mm-sig/2012-February/001328.html
patches version 10: - stop adding private field in ion_heap structure - put ion_heap into struct ion_cma_heap
patches version 9: - rebased on Android kernel - make cma heap able to support ION_FLAG_CACHED flag
patches version 8: - fix memory leak when release sg_table - remove virt_to_phys from ion_cma_phys
patches version 7: - rebased on Android kernel - fix ion Makefile - add ion_cma_map_kernel function - remove CONFIG_CMA compilation flags from ion_heap.c
patches version 6: - add private field in ion_platform_heap to pass the device linked with CMA. - rework CMA heap to use private field. - prepare CMA heap for incoming dma_common_get_sgtable function http://lists.linaro.org/pipermail/linaro-mm-sig/2012-June/002109.html - simplify ion-ux500 driver.
patches version 5: - port patches on android kernel 3.4 where ION use dmabuf - add ion_cma_heap_map_dma and ion_cma_heap_unmap_dma functions
patches version 4: - add ION_HEAP_TYPE_DMA heap type in ion_heap_type enum. - CMA heap is now a "native" ION heap. - add ion_heap_create_full function to keep backward compatibilty. - clean up included files in CMA heap - ux500-ion is using ion_heap_create_full instead of ion_heap_create
patches version 3: - add a private field in ion_heap structure instead of expose ion_device structure to all heaps - ion_cma_heap is no more a platform driver - ion_cma_heap use ion_heap private field to store the device pointer and make the link with reserved CMA regions - provide ux500-ion driver and configuration file for snowball board to give an example of how use CMA heaps
patches version 2: - fix comments done by Andy Green
Benjamin Gaignard (2): gpu: ion: fix ion_platform_data definition gpu: ion: add CMA heap
drivers/gpu/ion/Makefile | 1 + drivers/gpu/ion/ion_cma_heap.c | 234 ++++++++++++++++++++++++++++++++++++++++ drivers/gpu/ion/ion_heap.c | 6 ++ drivers/gpu/ion/ion_priv.h | 14 +++ include/linux/ion.h | 5 +- 5 files changed, 259 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/ion/ion_cma_heap.c
fix ion_platform_heap to make is use an usual way in board configuration file.
Signed-off-by: Benjamin Gaignard benjamin.gaignard@linaro.org --- include/linux/ion.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/ion.h b/include/linux/ion.h index 8414a6d..2ce49d0 100644 --- a/include/linux/ion.h +++ b/include/linux/ion.h @@ -105,7 +105,7 @@ struct ion_platform_heap { */ struct ion_platform_data { int nr; - struct ion_platform_heap heaps[]; + struct ion_platform_heap *heaps; };
/**
New heap type ION_HEAP_TYPE_DMA where allocation is done with dma_alloc_coherent API. device coherent_dma_mask must be set to DMA_BIT_MASK(32). ion_platform_heap private field is used to retrieve the device linked to CMA, if NULL the default CMA area is used. ion_cma_get_sgtable is a copy of dma_common_get_sgtable function which should be in kernel 3.5
Signed-off-by: Benjamin Gaignard benjamin.gaignard@linaro.org --- drivers/gpu/ion/Makefile | 1 + drivers/gpu/ion/ion_cma_heap.c | 234 ++++++++++++++++++++++++++++++++++++++++ drivers/gpu/ion/ion_heap.c | 6 ++ drivers/gpu/ion/ion_priv.h | 14 +++ include/linux/ion.h | 3 + 5 files changed, 258 insertions(+) create mode 100644 drivers/gpu/ion/ion_cma_heap.c
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile index 306fff9..1bd9c6a 100644 --- a/drivers/gpu/ion/Makefile +++ b/drivers/gpu/ion/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ ion_carveout_heap.o ion_chunk_heap.o +obj-$(CONFIG_ION_CMA) += ion_cma_heap.o obj-$(CONFIG_ION_TEGRA) += tegra/ diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c new file mode 100644 index 0000000..a3e0b54 --- /dev/null +++ b/drivers/gpu/ion/ion_cma_heap.c @@ -0,0 +1,234 @@ +/* + * drivers/gpu/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: benjamin.gaignard@linaro.org for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/ion.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> + +/* for ion_heap_ops structure */ +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { + struct ion_heap heap; + struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { + void *cpu_addr; + dma_addr_t handle; + struct sg_table *table; +}; + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replaced by dma_common_get_sgtable + * as soon as it will avalaible. + */ +int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} + +/* + * Create scatter-list for each page of the already allocated DMA buffer. + */ +int ion_cma_get_sgtable_per_page(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret, i; + struct scatterlist *sg; + + ret = sg_alloc_table(sgt, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg = sgt->sgl; + for (i = 0; i < (PAGE_ALIGN(size) / PAGE_SIZE); i++) { + page = virt_to_page(cpu_addr + (i * PAGE_SIZE)); + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = sg_next(sg); + } + return 0; +} + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info; + + dev_dbg(dev, "Request buffer allocation len %ld\n", len); + + info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); + if (!info) { + dev_err(dev, "Can't allocate buffer info\n"); + return ION_CMA_ALLOCATE_FAILED; + } + + info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0); + + if (!info->cpu_addr) { + dev_err(dev, "Fail to allocate buffer\n"); + goto err; + } + + info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!info->table) { + dev_err(dev, "Fail to allocate sg table\n"); + goto free_mem; + } + + if (ion_buffer_fault_user_mappings(buffer)) { + if (ion_cma_get_sgtable_per_page + (dev, info->table, info->cpu_addr, info->handle, len)) + goto free_table; + } else { + if (ion_cma_get_sgtable + (dev, info->table, info->cpu_addr, info->handle, len)) + goto free_table; + } + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(dev, "Allocate buffer %p\n", buffer); + return 0; + +free_table: + kfree(info->table); +free_mem: + dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Release buffer %p\n", buffer); + /* release memory */ + dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); + /* release sg table */ + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer, + info->handle); + + *addr = info->handle; + *len = buffer->size; + + return 0; +} + +struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return info->table; +} + +void ion_cma_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, + buffer->size); +} + +void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; +} + +static struct ion_heap_ops ion_cma_ops = { + .allocate = ion_cma_allocate, + .free = ion_cma_free, + .map_dma = ion_cma_heap_map_dma, + .unmap_dma = ion_cma_heap_unmap_dma, + .phys = ion_cma_phys, + .map_user = ion_cma_mmap, + .map_kernel = ion_cma_map_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_heap *cma_heap; + + cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + + if (!cma_heap) + return ERR_PTR(-ENOMEM); + + cma_heap->heap.ops = &ion_cma_ops; + /* get device from private heaps data, later it will be + * used to make the link with reserved CMA memory */ + cma_heap->dev = data->priv; + cma_heap->heap.type = ION_HEAP_TYPE_DMA; + return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + + kfree(cma_heap); +} diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c index 225ef94..6d68182 100644 --- a/drivers/gpu/ion/ion_heap.c +++ b/drivers/gpu/ion/ion_heap.c @@ -147,6 +147,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) case ION_HEAP_TYPE_CHUNK: heap = ion_chunk_heap_create(heap_data); break; + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; default: pr_err("%s: Invalid heap type %d\n", __func__, heap_data->type); @@ -183,6 +186,9 @@ void ion_heap_destroy(struct ion_heap *heap) case ION_HEAP_TYPE_CHUNK: ion_chunk_heap_destroy(heap); break; + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; default: pr_err("%s: Invalid heap type %d\n", __func__, heap->type); diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h index 50568147..da82c53 100644 --- a/drivers/gpu/ion/ion_priv.h +++ b/drivers/gpu/ion/ion_priv.h @@ -232,6 +232,20 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, unsigned long align); void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size); + +#ifdef CONFIG_CMA +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *heap); +void ion_cma_heap_destroy(struct ion_heap *heap); +#else +static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap + *heap) +{ + return NULL; +} + +static inline void ion_cma_heap_destroy(struct ion_heap *heap) {}; +#endif + /** * The carveout heap returns physical addresses, since 0 may be a valid * physical address, this is used to indicate allocation failed diff --git a/include/linux/ion.h b/include/linux/ion.h index 2ce49d0..440f4b3 100644 --- a/include/linux/ion.h +++ b/include/linux/ion.h @@ -27,6 +27,7 @@ struct ion_handle; * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved * carveout heap, allocations are physically * contiguous + * @ION_HEAP_TYPE_DMA: memory allocated via DMA API * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask * is used to identify the heaps, so only 32 * total heap types are supported @@ -36,6 +37,7 @@ enum ion_heap_type { ION_HEAP_TYPE_SYSTEM_CONTIG, ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_CHUNK, + ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always are at the end of this enum */ ION_NUM_HEAPS = 16, @@ -44,6 +46,7 @@ enum ion_heap_type { #define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) #define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
linaro-mm-sig@lists.linaro.org