From: Prathyush K prathyush.k@samsung.com
With these set of patches, the Exynos5 DRM module will be able to allocate, export and map non-contiguous memory for GEM objects using the dma-mapping IOMMU framework.
These patches are based on the following references:
1> Exynos 5 SYSMMU driver by Kyongho Cho http://lists.infradead.org/pipermail/linux-arm-kernel/2011-December/078636.h... 2> DMA-MAPPING IOMMU framework by Marek Szyprowski http://lists.infradead.org/pipermail/linux-arm-kernel/2011-June/053636.html 3> DRM-PRIME by Dave Airlie http://patches.linaro.org/7480/ 4> DRM Exynos DMABUF module by Inki Dae http://git.infradead.org/users/kmpark/linux-2.6-samsung/commit/ d59d3e521d8a9d27e7b329f89f24a8473f32f50c/ 5> Support for dma_get_pages by Tomasz Stanislawski http://patchwork.linuxtv.org/patch/9639/
The updated GEM framework by Inki Dae is not considered for this patch set.
IOMMU mapping is created for exynos-drm-device initially. Allocation of GEM objects happens through dma_alloc_writecombine which inturn calls arm_iommu_alloc_attrs. This will allocate a non-contig set of pages, allocate a iova and map all the pages.
The following changes are done to support non-contiguous memory for GEM objects
[PATCH 1/4]: drm/exynos: DMABUF: Added support for exporting non-contig buffers. This patch is for creating and mapping a SGT in map_dma_buf by retrieving the pages by calling dma_get_pages.
[PATCH 2/4]: drm/exynos: Mapping of gem objects uses dma_mmap_writecombine. This patch is for mapping the non-contiguous GEM objects to user space by calling dma_mmap_writecombine.
[PATCH 3/4]: drm/Exynos: Added 'disable' function to Exynos drm crtc module. This patch is required for safe release of DRM. If a FB of a CRTC is released, the driver tries to disable the CRTC if that is supported by the CRTC. This patch adds Exynos DRM CRTC disable functionality.
[PATCH 4/4]: drm: Releasing FBs before releasing GEM objects during drm_release. This patch is required for safe release of DRM. During drm release, all the FBs and GEM objects are released. A gem object which is used as a FB must not be freed first before releasing the FB. This patch modifies the drm release function to first release the FBs and then release the GEMs.
drivers/gpu/drm/drm_fops.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 11 +++ drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 98 +++++++++++++++++++++++----- drivers/gpu/drm/exynos/exynos_drm_gem.c | 70 +++++++++----------- 4 files changed, 128 insertions(+), 57 deletions(-)
With this change, the exynos drm dmabuf module can export and import dmabuf of gem objects with non-continuous memory.
The exynos_map_dmabuf function can create SGT of a non-contiguous buffer by calling dma_get_pages to retrieve the allocated pages and then maps the SGT to the caller's address space.
Signed-off-by: Prathyush K prathyush.k@samsung.com --- drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 98 +++++++++++++++++++++++----- 1 files changed, 81 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index cbb6ad4..54b88bd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -56,6 +56,59 @@ static void exynos_dmabuf_detach(struct dma_buf *dmabuf, dma_buf_put(dmabuf); }
+ +static struct sg_table *drm_dc_pages_to_sgt(struct page **pages, + unsigned long n_pages, size_t offset, size_t offset2, dma_addr_t daddr) +{ + struct sg_table *sgt; + struct scatterlist *s; + int i, j, cur_page, chunks, ret; + + sgt = kzalloc(sizeof *sgt, GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + /* compute number of chunks */ + chunks = 1; + for (i = 1; i < n_pages; ++i) + if (pages[i] != pages[i - 1] + 1) + ++chunks; + + ret = sg_alloc_table(sgt, chunks, GFP_KERNEL); + if (ret) { + kfree(sgt); + return ERR_PTR(-ENOMEM); + } + + /* merging chunks and putting them into the scatterlist */ + cur_page = 0; + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { + size_t size = PAGE_SIZE; + + for (j = cur_page + 1; j < n_pages; ++j) { + if (pages[j] != pages[j - 1] + 1) + break; + size += PAGE_SIZE; + } + + /* cut offset if chunk starts at the first page */ + if (cur_page == 0) + size -= offset; + /* cut offset2 if chunk ends at the last page */ + if (j == n_pages) + size -= offset2; + + sg_set_page(s, pages[cur_page], size, offset); + s->dma_address = daddr; + daddr += size; + offset = 0; + cur_page = j; + } + + return sgt; +} + + static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { @@ -64,6 +117,8 @@ static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, struct exynos_drm_gem_buf *buffer; struct sg_table *sgt; int ret; + int size, n_pages; + struct page **pages = NULL;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -71,27 +126,37 @@ static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach,
buffer = exynos_gem_obj->buffer;
- /* TODO. consider physically non-continuous memory with IOMMU. */ + size = buffer->size; + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) { - DRM_DEBUG_KMS("failed to allocate sg table.\n"); - return ERR_PTR(-ENOMEM); + pages = kmalloc(n_pages * sizeof pages[0], GFP_KERNEL); + if (!pages) { + DRM_DEBUG_KMS("failed to alloc page table\n"); + return NULL; }
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + ret = dma_get_pages(attach->dev, buffer->kvaddr, + buffer->dma_addr, pages, n_pages); if (ret < 0) { - DRM_DEBUG_KMS("failed to allocate scatter list.\n"); - kfree(sgt); - sgt = NULL; - return ERR_PTR(-ENOMEM); + DRM_DEBUG_KMS("failed to get buffer pages from DMA API\n"); + return NULL; } + if (ret != n_pages) { + DRM_DEBUG_KMS("failed to get all pages from DMA API\n"); + return NULL; + } + + sgt = drm_dc_pages_to_sgt(pages, n_pages, 0, 0, buffer->dma_addr); + if (IS_ERR(sgt)) { + DRM_DEBUG_KMS("failed to prepare sg table\n"); + return NULL; + } + + sgt->nents = dma_map_sg(attach->dev, sgt->sgl, + sgt->orig_nents, DMA_BIDIRECTIONAL);
- sg_init_table(sgt->sgl, 1); - sg_dma_len(sgt->sgl) = buffer->size; - sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(buffer->dma_addr)), - buffer->size, 0); - sg_dma_address(sgt->sgl) = buffer->dma_addr; + /* pages are no longer needed */ + kfree(pages);
/* * increase reference count of this buffer. @@ -303,8 +368,6 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, if (ret < 0) goto fail_handle;
- /* consider physically non-continuous memory with IOMMU. */ - buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl);
@@ -316,6 +379,7 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, atomic_set(&buffer->shared_refcount, 1);
exynos_gem_obj->base.import_attach = attach; + exynos_gem_obj->buffer = buffer;
ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime, dmabuf, *handle);
Hi,
I saw your description but please generate the patches against the latest codes at next time.
On 4/14/12, Prathyush prathyush.k@samsung.com wrote:
With this change, the exynos drm dmabuf module can export and import dmabuf of gem objects with non-continuous memory.
The exynos_map_dmabuf function can create SGT of a non-contiguous buffer by calling dma_get_pages to retrieve the allocated pages and then maps the SGT to the caller's address space.
Signed-off-by: Prathyush K prathyush.k@samsung.com
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 98 +++++++++++++++++++++++----- 1 files changed, 81 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index cbb6ad4..54b88bd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -56,6 +56,59 @@ static void exynos_dmabuf_detach(struct dma_buf *dmabuf, dma_buf_put(dmabuf); }
Useless newline.
+static struct sg_table *drm_dc_pages_to_sgt(struct page **pages,
- unsigned long n_pages, size_t offset, size_t offset2, dma_addr_t daddr)
+{
- struct sg_table *sgt;
- struct scatterlist *s;
- int i, j, cur_page, chunks, ret;
- sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
- if (!sgt)
return ERR_PTR(-ENOMEM);
- /* compute number of chunks */
- chunks = 1;
- for (i = 1; i < n_pages; ++i)
if (pages[i] != pages[i - 1] + 1)
++chunks;
- ret = sg_alloc_table(sgt, chunks, GFP_KERNEL);
- if (ret) {
kfree(sgt);
return ERR_PTR(-ENOMEM);
- }
- /* merging chunks and putting them into the scatterlist */
- cur_page = 0;
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
size_t size = PAGE_SIZE;
for (j = cur_page + 1; j < n_pages; ++j) {
if (pages[j] != pages[j - 1] + 1)
break;
size += PAGE_SIZE;
}
/* cut offset if chunk starts at the first page */
if (cur_page == 0)
size -= offset;
/* cut offset2 if chunk ends at the last page */
if (j == n_pages)
size -= offset2;
sg_set_page(s, pages[cur_page], size, offset);
s->dma_address = daddr;
daddr += size;
offset = 0;
cur_page = j;
- }
- return sgt;
+}
This function is almost same as Tomasz one at v4l2. To Tomasz, Can you make it common helper function at common place?
Useless newline too.
static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { @@ -64,6 +117,8 @@ static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, struct exynos_drm_gem_buf *buffer; struct sg_table *sgt; int ret;
int size, n_pages;
struct page **pages = NULL;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -71,27 +126,37 @@ static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach,
buffer = exynos_gem_obj->buffer;
- /* TODO. consider physically non-continuous memory with IOMMU. */
- size = buffer->size;
- n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt) {
DRM_DEBUG_KMS("failed to allocate sg table.\n");
return ERR_PTR(-ENOMEM);
- pages = kmalloc(n_pages * sizeof pages[0], GFP_KERNEL);
kcalloc?
Thank you, Kyungmin Park
- if (!pages) {
DRM_DEBUG_KMS("failed to alloc page table\n");
}return NULL;
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- ret = dma_get_pages(attach->dev, buffer->kvaddr,
if (ret < 0) {buffer->dma_addr, pages, n_pages);
DRM_DEBUG_KMS("failed to allocate scatter list.\n");
kfree(sgt);
sgt = NULL;
return ERR_PTR(-ENOMEM);
DRM_DEBUG_KMS("failed to get buffer pages from DMA API\n");
}return NULL;
- if (ret != n_pages) {
DRM_DEBUG_KMS("failed to get all pages from DMA API\n");
return NULL;
- }
- sgt = drm_dc_pages_to_sgt(pages, n_pages, 0, 0, buffer->dma_addr);
- if (IS_ERR(sgt)) {
DRM_DEBUG_KMS("failed to prepare sg table\n");
return NULL;
- }
- sgt->nents = dma_map_sg(attach->dev, sgt->sgl,
sgt->orig_nents, DMA_BIDIRECTIONAL);
- sg_init_table(sgt->sgl, 1);
- sg_dma_len(sgt->sgl) = buffer->size;
- sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(buffer->dma_addr)),
buffer->size, 0);
- sg_dma_address(sgt->sgl) = buffer->dma_addr;
/* pages are no longer needed */
kfree(pages);
/*
- increase reference count of this buffer.
@@ -303,8 +368,6 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, if (ret < 0) goto fail_handle;
- /* consider physically non-continuous memory with IOMMU. */
- buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl);
@@ -316,6 +379,7 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, atomic_set(&buffer->shared_refcount, 1);
exynos_gem_obj->base.import_attach = attach;
exynos_gem_obj->buffer = buffer;
ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime, dmabuf, *handle);
-- 1.7.0.4
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Dear Mr. Park,
I will prepare and post a patch to lib/scatterlist.c that adds support for initialization of sg_table from page array.
Yours sincerely, Tomasz Stanislawski
Hi Prathyush.
First of all, thanks for your patch but your patch set isn't considered for latest exynos dmabuf. we already updated dmabuf support for exynos drm and this is also considered for non-continuous memory region. you can refer to git repository below: http://git.infradead.org/users/kmpark/linux-2.6-samsung/shortlog/refs/heads/ exynos-drm-prime
and this repository has already been tested by me and Tomasz. for this, please refer to link below: http://www.spinics.net/lists/linux-media/msg46292.html
last week, we had posted exynos-drm-fixes update and it has been merged to drm-fixes last week. so we are just waiting for drm-next update to post latest exynos-drm-prime. sorry but please, re-sync your patch set with latest Dave's drm-next after latest exynos-drm-prime is posted so your patch set can't be reviewed yet.
Thanks, Inki Dae.
-----Original Message----- From: Prathyush [mailto:prathyush.k@samsung.com] Sent: Saturday, April 14, 2012 8:52 PM To: dri-devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org Cc: inki.dae@samsung.com; subash.rp@samsung.com; prashanth.g@samsung.com; sunilm@samsung.com; prathyush.k@samsung.com Subject: [PATCH 1/4] [RFC] drm/exynos: DMABUF: Added support for exporting non-contig buffers
With this change, the exynos drm dmabuf module can export and import dmabuf of gem objects with non-continuous memory.
The exynos_map_dmabuf function can create SGT of a non-contiguous buffer by calling dma_get_pages to retrieve the allocated pages and then maps the SGT to the caller's address space.
Signed-off-by: Prathyush K prathyush.k@samsung.com
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 98
+++++++++++++++++++++++-
1 files changed, 81 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index cbb6ad4..54b88bd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -56,6 +56,59 @@ static void exynos_dmabuf_detach(struct dma_buf
*dmabuf,
dma_buf_put(dmabuf); }
+static struct sg_table *drm_dc_pages_to_sgt(struct page **pages,
- unsigned long n_pages, size_t offset, size_t offset2, dma_addr_t
daddr) +{
- struct sg_table *sgt;
- struct scatterlist *s;
- int i, j, cur_page, chunks, ret;
- sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
- if (!sgt)
return ERR_PTR(-ENOMEM);
- /* compute number of chunks */
- chunks = 1;
- for (i = 1; i < n_pages; ++i)
if (pages[i] != pages[i - 1] + 1)
++chunks;
- ret = sg_alloc_table(sgt, chunks, GFP_KERNEL);
- if (ret) {
kfree(sgt);
return ERR_PTR(-ENOMEM);
- }
- /* merging chunks and putting them into the scatterlist */
- cur_page = 0;
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
size_t size = PAGE_SIZE;
for (j = cur_page + 1; j < n_pages; ++j) {
if (pages[j] != pages[j - 1] + 1)
break;
size += PAGE_SIZE;
}
/* cut offset if chunk starts at the first page */
if (cur_page == 0)
size -= offset;
/* cut offset2 if chunk ends at the last page */
if (j == n_pages)
size -= offset2;
sg_set_page(s, pages[cur_page], size, offset);
s->dma_address = daddr;
daddr += size;
offset = 0;
cur_page = j;
- }
- return sgt;
+}
static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { @@ -64,6 +117,8 @@ static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach, struct exynos_drm_gem_buf *buffer; struct sg_table *sgt; int ret;
int size, n_pages;
struct page **pages = NULL;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -71,27 +126,37 @@ static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach,
buffer = exynos_gem_obj->buffer;
- /* TODO. consider physically non-continuous memory with IOMMU. */
- size = buffer->size;
- n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt) {
DRM_DEBUG_KMS("failed to allocate sg table.\n");
return ERR_PTR(-ENOMEM);
- pages = kmalloc(n_pages * sizeof pages[0], GFP_KERNEL);
- if (!pages) {
DRM_DEBUG_KMS("failed to alloc page table\n");
}return NULL;
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- ret = dma_get_pages(attach->dev, buffer->kvaddr,
if (ret < 0) {buffer->dma_addr, pages, n_pages);
DRM_DEBUG_KMS("failed to allocate scatter list.\n");
kfree(sgt);
sgt = NULL;
return ERR_PTR(-ENOMEM);
DRM_DEBUG_KMS("failed to get buffer pages from DMA API\n");
}return NULL;
- if (ret != n_pages) {
DRM_DEBUG_KMS("failed to get all pages from DMA API\n");
return NULL;
- }
- sgt = drm_dc_pages_to_sgt(pages, n_pages, 0, 0, buffer->dma_addr);
- if (IS_ERR(sgt)) {
DRM_DEBUG_KMS("failed to prepare sg table\n");
return NULL;
- }
- sgt->nents = dma_map_sg(attach->dev, sgt->sgl,
sgt->orig_nents, DMA_BIDIRECTIONAL);
- sg_init_table(sgt->sgl, 1);
- sg_dma_len(sgt->sgl) = buffer->size;
- sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(buffer->dma_addr)),
buffer->size, 0);
- sg_dma_address(sgt->sgl) = buffer->dma_addr;
/* pages are no longer needed */
kfree(pages);
/*
- increase reference count of this buffer.
@@ -303,8 +368,6 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, if (ret < 0) goto fail_handle;
- /* consider physically non-continuous memory with IOMMU. */
- buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->size = sg_dma_len(sgt->sgl);
@@ -316,6 +379,7 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev, atomic_set(&buffer->shared_refcount, 1);
exynos_gem_obj->base.import_attach = attach;
exynos_gem_obj->buffer = buffer;
ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime, dmabuf, *handle);
-- 1.7.0.4
GEM objects get mapped to user space in two ways - DIRECT and INDIRECT mapping. DIRECT mapping is by calling an ioctl and it maps all the pages to user space by calling remap-pfn-range. Indirect mapping is done by calling 'mmap'. The actual mapping is done when a page fault gets generated and is handled by exynos_drm_gem_fault function where the required page is mapped. Both the methods assume contiguous memory.
With this change, the mapping is done by dma_mmap_writecombine which will support mapping of non-contiguous memory to user space.
This works similar to the previous approach for the case of DIRECT mapping. But in the case of mapping when a page fault occurs, dma_mmap_writecombine will map all the pages and not just one page.
Signed-off-by: Prathyush K prathyush.k@samsung.com --- drivers/gpu/drm/exynos/exynos_drm_gem.c | 70 ++++++++++++++---------------- 1 files changed, 33 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 807143e..a57a83a 100755 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -200,40 +200,27 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, { struct drm_gem_object *obj = filp->private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct exynos_drm_gem_buf *buffer; - unsigned long pfn, vm_size; - - DRM_DEBUG_KMS("%s\n", __FILE__); + void *kva; + dma_addr_t dma_address; + unsigned long ret;
- vma->vm_flags |= (VM_IO | VM_RESERVED); + kva = exynos_gem_obj->buffer->kvaddr;
- /* in case of direct mapping, always having non-cachable attribute */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_file = filp; - - vm_size = vma->vm_end - vma->vm_start; - /* - * a buffer contains information to physically continuous memory - * allocated by user request or at framebuffer creation. - */ - buffer = exynos_gem_obj->buffer; - - /* check if user-requested size is valid. */ - if (vm_size > buffer->size) - return -EINVAL; + if (kva == NULL) { + DRM_ERROR("No KVA Found\n"); + return -EAGAIN; + }
- /* - * get page frame number to physical memory to be mapped - * to user space. - */ - pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT; + dma_address = exynos_gem_obj->buffer->dma_addr; + vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; + vma->vm_pgoff = 0;
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); + ret = dma_mmap_writecombine(obj->dev->dev, vma, kva, + dma_address, vma->vm_end - vma->vm_start);
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, - vma->vm_page_prot)) { - DRM_ERROR("failed to remap pfn range.\n"); - return -EAGAIN; + if (ret) { + DRM_ERROR("Remapping memory failed, error: %ld\n", ret); + return ret; }
return 0; @@ -433,19 +420,29 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct drm_device *dev = obj->dev; - unsigned long pfn; - pgoff_t page_offset; + void *kva; + dma_addr_t dma_address; int ret;
- page_offset = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + kva = exynos_gem_obj->buffer->kvaddr; + + if (kva == NULL) { + DRM_ERROR("No KVA Found\n"); + return -EAGAIN; + }
mutex_lock(&dev->struct_mutex);
- pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >> - PAGE_SHIFT) + page_offset;
- ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + dma_address = exynos_gem_obj->buffer->dma_addr; + vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; + vma->vm_pgoff = 0; + + ret = dma_mmap_writecombine(obj->dev->dev, vma, kva, + dma_address, vma->vm_end - vma->vm_start); + + if (ret) + DRM_ERROR("Remapping memory failed, error: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -457,7 +454,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) int ret;
DRM_DEBUG_KMS("%s\n", __FILE__); - /* set vm_area_struct. */ ret = drm_gem_mmap(filp, vma); if (ret < 0) {
Hi,
This is also patch set based on old version and the feature below has already been merged to mainline. Prathyush, please DO WORK patch set based on latest drm-next for new feature and drm-fixes for bug fixes from next time and also include me as Maintainer.
Thanks, Inki Dae.
-----Original Message----- From: Prathyush [mailto:prathyush.k@samsung.com] Sent: Saturday, April 14, 2012 8:52 PM To: dri-devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org Cc: inki.dae@samsung.com; subash.rp@samsung.com; prashanth.g@samsung.com; sunilm@samsung.com; prathyush.k@samsung.com Subject: [PATCH 2/4] [RFC] drm/exynos: Mapping of gem objects uses dma_mmap_writecombine
GEM objects get mapped to user space in two ways - DIRECT and INDIRECT mapping. DIRECT mapping is by calling an ioctl and it maps all the pages to user space by calling remap-pfn-range. Indirect mapping is done by calling 'mmap'. The actual mapping is done when a page fault gets generated and is handled by exynos_drm_gem_fault function where the required page is mapped. Both the methods assume contiguous memory.
With this change, the mapping is done by dma_mmap_writecombine which will support mapping of non-contiguous memory to user space.
This works similar to the previous approach for the case of DIRECT mapping. But in the case of mapping when a page fault occurs, dma_mmap_writecombine will map all the pages and not just one page.
Signed-off-by: Prathyush K prathyush.k@samsung.com
drivers/gpu/drm/exynos/exynos_drm_gem.c | 70
++++++++++++++-------------
1 files changed, 33 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 807143e..a57a83a 100755 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -200,40 +200,27 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, { struct drm_gem_object *obj = filp->private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size;
- DRM_DEBUG_KMS("%s\n", __FILE__);
- void *kva;
- dma_addr_t dma_address;
- unsigned long ret;
- vma->vm_flags |= (VM_IO | VM_RESERVED);
- kva = exynos_gem_obj->buffer->kvaddr;
- /* in case of direct mapping, always having non-cachable attribute
*/
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_file = filp;
- vm_size = vma->vm_end - vma->vm_start;
- /*
* a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- buffer = exynos_gem_obj->buffer;
- /* check if user-requested size is valid. */
- if (vm_size > buffer->size)
return -EINVAL;
- if (kva == NULL) {
DRM_ERROR("No KVA Found\n");
return -EAGAIN;
- }
- /*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
PAGE_SHIFT;
- dma_address = exynos_gem_obj->buffer->dma_addr;
- vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
- vma->vm_pgoff = 0;
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
- ret = dma_mmap_writecombine(obj->dev->dev, vma, kva,
dma_address, vma->vm_end - vma->vm_start);
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
vma->vm_page_prot)) {
DRM_ERROR("failed to remap pfn range.\n");
return -EAGAIN;
if (ret) {
DRM_ERROR("Remapping memory failed, error: %ld\n", ret);
return ret;
}
return 0;
@@ -433,19 +420,29 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct drm_device *dev = obj->dev;
- unsigned long pfn;
- pgoff_t page_offset;
- void *kva;
- dma_addr_t dma_address; int ret;
- page_offset = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
kva = exynos_gem_obj->buffer->kvaddr;
if (kva == NULL) {
DRM_ERROR("No KVA Found\n");
return -EAGAIN;
}
mutex_lock(&dev->struct_mutex);
pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
pfn);
dma_address = exynos_gem_obj->buffer->dma_addr;
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_pgoff = 0;
ret = dma_mmap_writecombine(obj->dev->dev, vma, kva,
dma_address, vma->vm_end - vma->vm_start);
if (ret)
DRM_ERROR("Remapping memory failed, error: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -457,7 +454,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* set vm_area_struct. */ ret = drm_gem_mmap(filp, vma); if (ret < 0) {
-- 1.7.0.4
While freeing an fb, if the fb is being used by a crtc, the drm driver tries to disable the CRTC. Currently there is no disable function provided by exynos drm crtc module so the driver tries to suspend the crtc (by calling dpms) which only works if RUNTIME PM is enabled.
Signed-off-by: Prathyush K prathyush.k@samsung.com --- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e3861ac..eb1e553 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -192,6 +192,16 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) mutex_unlock(&dev->struct_mutex); }
+static void exynos_drm_crtc_disable(struct drm_crtc *crtc) +{ + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct exynos_drm_overlay *overlay = &exynos_crtc->overlay; + int win = overlay->zpos; + + exynos_drm_fn_encoder(crtc, &win, + exynos_drm_encoder_crtc_disable); +} + static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) { DRM_DEBUG_KMS("%s\n", __FILE__); @@ -278,6 +288,7 @@ static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .dpms = exynos_drm_crtc_dpms, + .disable = exynos_drm_crtc_disable, .prepare = exynos_drm_crtc_prepare, .commit = exynos_drm_crtc_commit, .mode_fixup = exynos_drm_crtc_mode_fixup,
-----Original Message----- From: Prathyush [mailto:prathyush.k@samsung.com] Sent: Saturday, April 14, 2012 8:52 PM To: dri-devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org Cc: inki.dae@samsung.com; subash.rp@samsung.com; prashanth.g@samsung.com; sunilm@samsung.com; prathyush.k@samsung.com Subject: [PATCH 3/4] [RFC] drm/Exynos: Added 'disable' function to Exynos drm crtc module.
While freeing an fb, if the fb is being used by a crtc, the drm driver tries to disable the CRTC. Currently there is no disable function provided by exynos drm crtc module so the driver tries to suspend the crtc (by calling dpms) which only works if RUNTIME PM is enabled.
Signed-off-by: Prathyush K prathyush.k@samsung.com
drivers/gpu/drm/exynos/exynos_drm_crtc.c | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e3861ac..eb1e553 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -192,6 +192,16 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) mutex_unlock(&dev->struct_mutex); }
+static void exynos_drm_crtc_disable(struct drm_crtc *crtc) +{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
- int win = overlay->zpos;
- exynos_drm_fn_encoder(crtc, &win,
exynos_drm_encoder_crtc_disable);
+}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) { DRM_DEBUG_KMS("%s\n", __FILE__); @@ -278,6 +288,7 @@ static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .dpms = exynos_drm_crtc_dpms,
- .disable = exynos_drm_crtc_disable, .prepare = exynos_drm_crtc_prepare, .commit = exynos_drm_crtc_commit, .mode_fixup = exynos_drm_crtc_mode_fixup,
--
dpms callback DO WORK same thing please see drm_crtc.c file, dpms callback would be called if disable is NULL. your patch set makes our dpms callback not to be used. and exynos_drm_encoder_crtc_disable is called by exynos_drm_plane module for each overlay.
Thanks.
1.7.0.4
During DRM release, all the FBs and gem objects are released. If a gem object is being used as a FB and set to a crtc, it must not be freed before releasing the framebuffer first.
If FBs are released first, the crtc using the FB is disabled first so now the GEM object can be freed safely. The CRTC will be enabled again when the driver restores fbdev mode.
Signed-off-by: Prathyush K prathyush.k@samsung.com --- drivers/gpu/drm/drm_fops.c | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 6263b01..f6525ef 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -498,12 +498,12 @@ int drm_release(struct inode *inode, struct file *filp)
drm_events_release(file_priv);
- if (dev->driver->driver_features & DRIVER_GEM) - drm_gem_release(dev, file_priv); - if (dev->driver->driver_features & DRIVER_MODESET) drm_fb_release(file_priv);
+ if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_release(dev, file_priv); + mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n;
linaro-mm-sig@lists.linaro.org