Extend dirty bit per PAGE_SIZE Page wised cache flush is supported and only takes effect for dirty buffer
Signed-off-by: Zhangfei Gao zhangfei.gao@marvell.com --- drivers/gpu/ion/ion_carveout_heap.c | 23 +++++++++++++++++------ 1 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c index 13f6e8d..60e97e5 100644 --- a/drivers/gpu/ion/ion_carveout_heap.c +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -88,25 +88,36 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer) { struct sg_table *table; - int ret; + struct scatterlist *sg; + int ret, i; + int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct page *page = phys_to_page(buffer->priv_phys);
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); - ret = sg_alloc_table(table, 1, GFP_KERNEL); + + ret = sg_alloc_table(table, nents, GFP_KERNEL); if (ret) { kfree(table); return ERR_PTR(ret); } - sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size, - 0); + + sg = table->sgl; + for (i = 0; i < nents; i++) { + sg_set_page(sg, page + i, PAGE_SIZE, 0); + sg = sg_next(sg); + } + return table; }
void ion_carveout_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buffer) { - sg_free_table(buffer->sg_table); + if (buffer->sg_table) + sg_free_table(buffer->sg_table); + kfree(buffer->sg_table); }
void *ion_carveout_heap_map_kernel(struct ion_heap *heap, @@ -157,7 +168,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) if (!carveout_heap) return ERR_PTR(-ENOMEM);
- carveout_heap->pool = gen_pool_create(12, -1); + carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1); if (!carveout_heap->pool) { kfree(carveout_heap); return ERR_PTR(-ENOMEM);