From: Benjamin Gaignard benjamin.gaignard@linaro.org
DRM already offer helpers to use CMA for dumb buffers. This patch add helpers to export/import gem_cam objects and allow them to be mmap from userland. The goal is to make working this kind of sequence: create_dumb, get fd from buffer handle and then use fd (maybe in another process which may ignore it is comming from DRM) to mmap the buffer.
drm_gem_cma_prime_export() add O_RDWR to flags to be sure that memory could be mmapped later with PROT_WRITE flag.
Signed-off-by: Benjamin Gaignard benjamin.gaignard@linaro.org --- drivers/gpu/drm/drm_gem_cma_helper.c | 192 ++++++++++++++++++++++++++++++++++ include/drm/drm_gem_cma_helper.h | 6 ++ 2 files changed, 198 insertions(+)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index bad85bb..936c337 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -21,12 +21,204 @@ #include <linux/slab.h> #include <linux/mutex.h> #include <linux/export.h> +#include <linux/dma-buf.h> #include <linux/dma-mapping.h>
#include <drm/drmP.h> #include <drm/drm.h> #include <drm/drm_gem_cma_helper.h>
+struct drm_gem_cma_dmabuf_attachment { + struct sg_table sgt; + enum dma_data_direction dir; + bool is_mapped; +}; + +static int drm_gem_cma_attach_dma_buf(struct dma_buf *dmabuf, + struct device *dev, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *drm_gem_cma_attach; + + drm_gem_cma_attach = kzalloc(sizeof(*drm_gem_cma_attach), GFP_KERNEL); + if (!drm_gem_cma_attach) + return -ENOMEM; + + drm_gem_cma_attach->dir = DMA_NONE; + attach->priv = drm_gem_cma_attach; + + return 0; +} + +static void drm_gem_cma_detach_dma_buf(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_cma_dmabuf_attachment *drm_gem_cma_attach = attach->priv; + struct sg_table *sgt; + + if (!drm_gem_cma_attach) + return; + + sgt = &drm_gem_cma_attach->sgt; + + if (drm_gem_cma_attach->dir != DMA_NONE) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, + drm_gem_cma_attach->dir); + + sg_free_table(sgt); + kfree(drm_gem_cma_attach); + attach->priv = NULL; +} + +static struct sg_table * +drm_gem_cma_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_cma_dmabuf_attachment *drm_gem_cma_attach = attach->priv; + struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv; + struct drm_device *dev = cma_obj->base.dev; + struct sg_table *sgt = NULL; + int nents, ret; + + /* just return current sgt if already requested. */ + if (drm_gem_cma_attach->dir == dir && drm_gem_cma_attach->is_mapped) + return &drm_gem_cma_attach->sgt; + + sgt = &drm_gem_cma_attach->sgt; + + ret = dma_common_get_sgtable(dev->dev, sgt, + cma_obj->vaddr, cma_obj->paddr, cma_obj->base.size); + if (ret) { + DRM_ERROR("failed to get sgt.\n"); + return ERR_PTR(-ENOMEM); + } + + mutex_lock(&dev->struct_mutex); + + if (dir != DMA_NONE) { + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); + if (!nents) { + DRM_ERROR("failed to map sgl with iommu.\n"); + sg_free_table(sgt); + sgt = ERR_PTR(-EIO); + goto err_unlock; + } + } + + drm_gem_cma_attach->is_mapped = true; + drm_gem_cma_attach->dir = dir; + attach->priv = drm_gem_cma_attach; + +err_unlock: + mutex_unlock(&dev->struct_mutex); + return sgt; +} + +static void drm_gem_cma_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + /* Nothing to do */ +} + +static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + + /* + * drm_gem_cma_dmabuf_release() call means that file object's + * f_count is 0 and it calls drm_gem_object_handle_unreference() + * to drop the references that these values had been increased + * at drm_prime_handle_to_fd() + */ + if (cma_obj->base.export_dma_buf == dmabuf) { + cma_obj->base.export_dma_buf = NULL; + + /* + * drop this gem object refcount to release allocated buffer + * and resources. + */ + drm_gem_object_unreference_unlocked(&cma_obj->base); + } +} + +static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num) +{ + struct drm_gem_cma_object *cma_obj = dma_buf->priv; + return cma_obj->paddr; +} + +static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj = dmabuf->priv; + struct drm_device *dev = cma_obj->base.dev; + int ret; + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + ret = dma_mmap_coherent(dev->dev, vma, + cma_obj->vaddr, cma_obj->paddr, cma_obj->base.size); + + if (ret) { + DRM_DEBUG_PRIME("Remapping memory failed, error: %d\n", ret); + return ret; + } + DRM_DEBUG_PRIME("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", + __func__, (unsigned long)cma_obj->paddr, vma->vm_start, + cma_obj->base.size); + + return ret; +} + +static struct dma_buf_ops drm_gem_cma_dmabuf_ops = { + .attach = drm_gem_cma_attach_dma_buf, + .detach = drm_gem_cma_detach_dma_buf, + .map_dma_buf = drm_gem_cma_map_dma_buf, + .unmap_dma_buf = drm_gem_cma_unmap_dma_buf, + .kmap = drm_gem_cma_dmabuf_kmap_atomic, + .kmap_atomic = drm_gem_cma_dmabuf_kmap_atomic, + .mmap = drm_gem_cma_dmabuf_mmap, + .release = drm_gem_cma_dmabuf_release, +}; + +struct dma_buf *drm_gem_cma_prime_export(struct drm_device *drm_dev, + struct drm_gem_object *obj, int flags) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + flags |= O_RDWR; + return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops, + cma_obj->base.size, flags); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_export); + +struct drm_gem_object *drm_gem_cma_prime_import(struct drm_device *drm_dev, + struct dma_buf *dmabuf) +{ + struct drm_gem_cma_object *cma_obj; + + if (dmabuf->ops == &drm_gem_cma_dmabuf_ops) { + struct drm_gem_object *obj; + + cma_obj = dmabuf->priv; + obj = &cma_obj->base; + + /* is it from our device? */ + if (obj->dev == drm_dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); + return obj; + } + } + return NULL; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import); + static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) { return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 63397ce..8ce21df 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -45,4 +45,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops; void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); #endif
+struct dma_buf *drm_gem_cma_prime_export(struct drm_device *drm_dev, + struct drm_gem_object *obj, int flags); + +struct drm_gem_object *drm_gem_cma_prime_import(struct drm_device *drm_dev, + struct dma_buf *dmabuf); + #endif /* __DRM_GEM_CMA_HELPER_H__ */
On Fri, Oct 18, 2013 at 11:00:57AM +0200, benjamin.gaignard@linaro.org wrote:
From: Benjamin Gaignard benjamin.gaignard@linaro.org
DRM already offer helpers to use CMA for dumb buffers. This patch add helpers to export/import gem_cam objects and allow them to be mmap from userland. The goal is to make working this kind of sequence: create_dumb, get fd from buffer handle and then use fd (maybe in another process which may ignore it is comming from DRM) to mmap the buffer.
drm_gem_cma_prime_export() add O_RDWR to flags to be sure that memory could be mmapped later with PROT_WRITE flag.
Signed-off-by: Benjamin Gaignard benjamin.gaignard@linaro.org
[snip]
+struct dma_buf *drm_gem_cma_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags)
+{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
- flags |= O_RDWR;
This here looks funny ... I think either we need to add more flags to the prime dma-buf exporting to also pass this flag through. Or we should just generally set this in dma_buf_export. Doing this as an exporter-specific hack feels wrong. -Daniel
- return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
cma_obj->base.size, flags);
+} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_export);
I realize that this patch (done for 3.10) is deprecated because there is something similar that has been push on 3.12.
Anyway the question about how/where set O_RDWR dma_buf_export is valid. Your advice are welcome.
Benjamin
2013/10/19 Daniel Vetter daniel@ffwll.ch:
On Fri, Oct 18, 2013 at 11:00:57AM +0200, benjamin.gaignard@linaro.org wrote:
From: Benjamin Gaignard benjamin.gaignard@linaro.org
DRM already offer helpers to use CMA for dumb buffers. This patch add helpers to export/import gem_cam objects and allow them to be mmap from userland. The goal is to make working this kind of sequence: create_dumb, get fd from buffer handle and then use fd (maybe in another process which may ignore it is comming from DRM) to mmap the buffer.
drm_gem_cma_prime_export() add O_RDWR to flags to be sure that memory could be mmapped later with PROT_WRITE flag.
Signed-off-by: Benjamin Gaignard benjamin.gaignard@linaro.org
[snip]
+struct dma_buf *drm_gem_cma_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags)
+{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
flags |= O_RDWR;
This here looks funny ... I think either we need to add more flags to the prime dma-buf exporting to also pass this flag through. Or we should just generally set this in dma_buf_export. Doing this as an exporter-specific hack feels wrong. -Daniel
return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
cma_obj->base.size, flags);
+} +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_export);
-- Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch
linaro-mm-sig@lists.linaro.org