Add the dma_ops for this restricted heap. For restricted buffer, cache_ops/mmap are not allowed, thus return EPERM for them.
Signed-off-by: Yong Wu yong.wu@mediatek.com --- drivers/dma-buf/heaps/restricted_heap.c | 103 ++++++++++++++++++++++++ 1 file changed, 103 insertions(+)
diff --git a/drivers/dma-buf/heaps/restricted_heap.c b/drivers/dma-buf/heaps/restricted_heap.c index 8c266a0f6192..ec4c63d2112d 100644 --- a/drivers/dma-buf/heaps/restricted_heap.c +++ b/drivers/dma-buf/heaps/restricted_heap.c @@ -12,6 +12,10 @@
#include "restricted_heap.h"
+struct restricted_heap_attachment { + struct sg_table *table; +}; + static int restricted_heap_memory_allocate(struct restricted_heap *heap, struct restricted_buffer *buf) { @@ -45,6 +49,104 @@ restricted_heap_memory_free(struct restricted_heap *heap, struct restricted_buff ops->memory_free(heap, buf); }
+static int restricted_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) +{ + struct restricted_buffer *restricted_buf = dmabuf->priv; + struct restricted_heap_attachment *a; + struct sg_table *table; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto err_free_attach; + } + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto err_free_sgt; + sg_set_page(table->sgl, NULL, restricted_buf->size, 0); + + a->table = table; + attachment->priv = a; + + return 0; + +err_free_sgt: + kfree(table); +err_free_attach: + kfree(a); + return ret; +} + +static void restricted_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) +{ + struct restricted_heap_attachment *a = attachment->priv; + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} + +static struct sg_table * +restricted_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direct) +{ + struct restricted_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + + return table; +} + +static void +restricted_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, + enum dma_data_direction direction) +{ + struct restricted_heap_attachment *a = attachment->priv; + + WARN_ON(a->table != table); +} + +static int +restricted_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) +{ + return -EPERM; +} + +static int +restricted_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) +{ + return -EPERM; +} + +static int restricted_heap_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + return -EPERM; +} + +static void restricted_heap_free(struct dma_buf *dmabuf) +{ + struct restricted_buffer *restricted_buf = dmabuf->priv; + struct restricted_heap *heap = dma_heap_get_drvdata(restricted_buf->heap); + + restricted_heap_memory_free(heap, restricted_buf); + kfree(restricted_buf); +} + +static const struct dma_buf_ops restricted_heap_buf_ops = { + .attach = restricted_heap_attach, + .detach = restricted_heap_detach, + .map_dma_buf = restricted_heap_map_dma_buf, + .unmap_dma_buf = restricted_heap_unmap_dma_buf, + .begin_cpu_access = restricted_heap_dma_buf_begin_cpu_access, + .end_cpu_access = restricted_heap_dma_buf_end_cpu_access, + .mmap = restricted_heap_dma_buf_mmap, + .release = restricted_heap_free, +}; + static struct dma_buf * restricted_heap_allocate(struct dma_heap *heap, unsigned long size, unsigned long fd_flags, unsigned long heap_flags) @@ -66,6 +168,7 @@ restricted_heap_allocate(struct dma_heap *heap, unsigned long size, if (ret) goto err_free_buf; exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &restricted_heap_buf_ops; exp_info.size = restricted_buf->size; exp_info.flags = fd_flags; exp_info.priv = restricted_buf;