Well,
V2 time! I was hinted to look at ttm_execbuf_util, and it does indeed contain some nice code.
My goal was to extend dma-buf in a generic way now, some elements from v1 are remaining,
most notably a dma-buf used for syncing. However it is expected now that dma-buf syncing will
work in a very specific way now, slightly more strict than in v1.
Instead of each buffer having their own dma-buf I put in 1 per command submission.
This submission hasn't been run-time tested yet, but I expect the api to go something like this.
Intended to be used like this:
list_init(&head);
add_list_tail(&validate1->entry, &head);
add_list_tail(&validate2->entry, &head);
add_list_tail(&validate3->entry, &head);
r = dmabufmgr_eu_reserve_buffers(&head);
if (r) return r;
// add waits on cpu or gpu
list_for_each_entry(validate, ...) {
if (!validate->sync_buf)
continue;
// Check attachments to see if we already imported sync_buf
// somewhere, if not attach to it.
// waiting until cur_seq - dmabuf->sync_val >= 0 on either cpu
// or as command submitted to gpu
// sync_buf itself is a dma-buf, so it should be trivial
// TODO: sync_buf should NEVER be validated, add is_sync_buf to dma_buf?
// If this step fails: dmabufmgr_eu_backoff_reservation
// else:
// dmabufmgr_eu_fence_buffer_objects(our_own_sync_buf,
// hwchannel * max(minhwalign, 4), ++counter[hwchannel]);
// XXX: Do we still require a minimum alignment? I set up 16 for nouveau,
// but this is no longer needed in this design since it only matters
// for writes for which nouveau would already control the offset.
}
// Some time after execbuffer was executed, doesn't have to be right away but before
// getting in the danger of our own counter wrapping around:
// grab dmabufmgr.lru_lock, and cleanup by unreffing sync_buf when
// sync_buf == ownbuf, sync_ofs == ownofs, and sync_val == saved_counter
// In the meantime someone else or even us might have reserved this dma_buf
// again, which is why all those checks are needed before unreffing.
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d70..86e7598 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o dma-buf-mgr.o dma-buf-mgr-eu.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/dma-buf-mgr-eu.c b/drivers/base/dma-buf-mgr-eu.c
new file mode 100644
index 0000000..27ebc68
--- /dev/null
+++ b/drivers/base/dma-buf-mgr-eu.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Canonical Ltd
+ *
+ * Based on ttm_bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/dma-buf-mgr.h>
+#include <linux/sched.h>
+#include <linux/export.h>
+
+static void dmabufmgr_eu_backoff_reservation_locked(struct list_head *list)
+{
+ struct dmabufmgr_validate *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct dma_buf *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ entry->reserved = false;
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ if (entry->sync_buf)
+ dma_buf_put(entry->sync_buf);
+ entry->sync_buf = NULL;
+ }
+}
+
+static int
+dmabufmgr_eu_wait_unreserved_locked(struct list_head *list,
+ struct dma_buf *bo)
+{
+ int ret;
+
+ spin_unlock(&dmabufmgr.lru_lock);
+ ret = dmabufmgr_bo_wait_unreserved(bo, true);
+ spin_lock(&dmabufmgr.lru_lock);
+ if (unlikely(ret != 0))
+ dmabufmgr_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+void
+dmabufmgr_eu_backoff_reservation(struct list_head *list)
+{
+ struct dmabufmgr_validate *entry;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct dmabufmgr_validate, head);
+ spin_lock(&dmabufmgr.lru_lock);
+ dmabufmgr_eu_backoff_reservation_locked(list);
+ spin_unlock(&dmabufmgr.lru_lock);
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_eu_backoff_reservation);
+
+int
+dmabufmgr_eu_reserve_buffers(struct list_head *list)
+{
+ struct dmabufmgr_validate *entry;
+ int ret;
+ u32 val_seq;
+
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->sync_buf = NULL;
+ }
+
+retry:
+ spin_lock(&dmabufmgr.lru_lock);
+ val_seq = dmabufmgr.counter++;
+
+ list_for_each_entry(entry, list, head) {
+ struct dma_buf *bo = entry->bo;
+
+retry_this_bo:
+ ret = dmabufmgr_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = dmabufmgr_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ spin_unlock(&dmabufmgr.lru_lock);
+ return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ dmabufmgr_eu_backoff_reservation_locked(list);
+ spin_unlock(&dmabufmgr.lru_lock);
+ ret = dmabufmgr_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ default:
+ dmabufmgr_eu_backoff_reservation_locked(list);
+ spin_unlock(&dmabufmgr.lru_lock);
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (bo->sync_buf)
+ get_dma_buf(bo->sync_buf);
+ entry->sync_buf = bo->sync_buf;
+ entry->sync_ofs = bo->sync_ofs;
+ entry->sync_val = bo->sync_val;
+ }
+ spin_unlock(&dmabufmgr.lru_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_eu_reserve_buffers);
+
+void
+dmabufmgr_eu_fence_buffer_objects(struct dma_buf *sync_buf, u32 ofs, u32 seq, struct list_head *list)
+{
+ struct dmabufmgr_validate *entry;
+ struct dma_buf *bo;
+
+ if (list_empty(list) || WARN_ON(!sync_buf))
+ return;
+
+ spin_lock(&dmabufmgr.lru_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ dmabufmgr_bo_unreserve_locked(bo);
+ entry->reserved = false;
+ if (entry->sync_buf)
+ dma_buf_put(entry->sync_buf);
+ entry->sync_buf = NULL;
+
+ get_dma_buf(sync_buf);
+ bo->sync_buf = sync_buf;
+ bo->sync_ofs = ofs;
+ bo->sync_val = seq;
+ }
+
+ spin_unlock(&dmabufmgr.lru_lock);
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_eu_fence_buffer_objects);
diff --git a/drivers/base/dma-buf-mgr.c b/drivers/base/dma-buf-mgr.c
new file mode 100644
index 0000000..14756ff
--- /dev/null
+++ b/drivers/base/dma-buf-mgr.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2012 Canonical Ltd
+ *
+ * Based on ttm_bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf-mgr.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+
+/* Based on ttm_bo.c with vm_lock and fence_lock removed
+ * lru_lock takes care of fence_lock as well
+ */
+struct dmabufmgr dmabufmgr = {
+ .lru_lock = __SPIN_LOCK_UNLOCKED(dmabufmgr.lru_lock),
+ .counter = 1,
+};
+
+int
+dmabufmgr_bo_reserve_locked(struct dma_buf *bo,
+ bool interruptible, bool no_wait,
+ bool use_sequence, u32 sequence)
+{
+ int ret;
+
+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ /**
+ * Deadlock avoidance for multi-bo reserving.
+ */
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ spin_unlock(&dmabufmgr.lru_lock);
+ ret = dmabufmgr_bo_wait_unreserved(bo, interruptible);
+ spin_lock(&dmabufmgr.lru_lock);
+
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (use_sequence) {
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ if (unlikely((bo->val_seq - sequence < (1 << 31))
+ || !bo->seq_valid))
+ wake_up_all(&bo->event_queue);
+
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ } else {
+ bo->seq_valid = false;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_bo_reserve_locked);
+
+int
+dmabufmgr_bo_reserve(struct dma_buf *bo,
+ bool interruptible, bool no_wait,
+ bool use_sequence, u32 sequence)
+{
+ int ret;
+
+ spin_lock(&dmabufmgr.lru_lock);
+ ret = dmabufmgr_bo_reserve_locked(bo, interruptible, no_wait,
+ use_sequence, sequence);
+ spin_unlock(&dmabufmgr.lru_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_bo_reserve);
+
+int
+dmabufmgr_bo_wait_unreserved(struct dma_buf *bo, bool interruptible)
+{
+ if (interruptible) {
+ return wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->reserved) == 0);
+ } else {
+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_bo_wait_unreserved);
+
+void dmabufmgr_bo_unreserve_locked(struct dma_buf *bo)
+{
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_bo_unreserve_locked);
+
+void dmabufmgr_bo_unreserve(struct dma_buf *bo)
+{
+ spin_lock(&dmabufmgr.lru_lock);
+ dmabufmgr_bo_unreserve_locked(bo);
+ spin_unlock(&dmabufmgr.lru_lock);
+}
+EXPORT_SYMBOL_GPL(dmabufmgr_bo_unreserve);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 24e88fe..01c4f71 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -40,6 +40,9 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dmabuf = file->private_data;
dmabuf->ops->release(dmabuf);
+ BUG_ON(waitqueue_active(&dmabuf->event_queue));
+ if (dmabuf->sync_buf)
+ dma_buf_put(dmabuf->sync_buf);
kfree(dmabuf);
return 0;
}
@@ -119,6 +122,7 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
+ init_waitqueue_head(&dmabuf->event_queue);
return dmabuf;
}
diff --git a/include/linux/dma-buf-mgr.h b/include/linux/dma-buf-mgr.h
new file mode 100644
index 0000000..b26462e
--- /dev/null
+++ b/include/linux/dma-buf-mgr.h
@@ -0,0 +1,84 @@
+/*
+ * Header file for dma buffer sharing framework.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal(a)ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd(a)arndb.de>, Rob Clark <rob(a)ti.com> and
+ * Daniel Vetter <daniel(a)ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __DMA_BUF_MGR_H__
+#define __DMA_BUF_MGR_H__
+
+#include <linux/dma-buf.h>
+#include <linux/list.h>
+
+/** Size of each hwcontext in synchronization dma-buf */
+#define DMABUFMGR_HWCONTEXT_SYNC_ALIGN 16
+
+struct dmabufmgr {
+ spinlock_t lru_lock;
+
+ u32 counter;
+};
+extern struct dmabufmgr dmabufmgr;
+
+extern int
+dmabufmgr_bo_reserve_locked(struct dma_buf *bo,
+ bool interruptible, bool no_wait,
+ bool use_sequence, u32 sequence);
+
+extern int
+dmabufmgr_bo_reserve(struct dma_buf *bo,
+ bool interruptible, bool no_wait,
+ bool use_sequence, u32 sequence);
+
+extern void
+dmabufmgr_bo_unreserve_locked(struct dma_buf *bo);
+
+extern void
+dmabufmgr_bo_unreserve(struct dma_buf *bo);
+
+extern int
+dmabufmgr_bo_wait_unreserved(struct dma_buf *bo, bool interruptible);
+
+/* execbuf util support for reservations
+ * matches ttm_execbuf_util
+ */
+struct dmabufmgr_validate {
+ struct list_head head;
+ struct dma_buf *bo;
+ bool reserved;
+
+ /* If non-null, check for attachments */
+ struct dma_buf *sync_buf;
+ u32 sync_ofs, sync_val;
+};
+
+/** reserve a linked list of struct dmabufmgr_validate entries */
+extern int
+dmabufmgr_eu_reserve_buffers(struct list_head *list);
+
+/** Undo reservation */
+extern void
+dmabufmgr_eu_backoff_reservation(struct list_head *list);
+
+/** Commit reservation */
+extern void
+dmabufmgr_eu_fence_buffer_objects(struct dma_buf *sync_buf, u32 ofs, u32 val, struct list_head *list);
+
+#endif /* __DMA_BUF_MGR_H__ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index eb48f38..b2ab395 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -113,6 +113,8 @@ struct dma_buf_ops {
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @priv: exporter specific private data for this buffer object.
+ * @bufmgr_entry: used by dmabufmgr
+ * @bufdev: used by dmabufmgr
*/
struct dma_buf {
size_t size;
@@ -122,6 +124,24 @@ struct dma_buf {
/* mutex to serialize list manipulation and attach/detach */
struct mutex lock;
void *priv;
+
+ /** dmabufmgr members */
+ wait_queue_head_t event_queue;
+
+ /**
+ * dmabufmgr members protected by the dmabufmgr::lru_lock.
+ */
+ u32 val_seq;
+ bool seq_valid;
+
+ struct dma_buf *sync_buf;
+ u32 sync_ofs, sync_val;
+
+ /**
+ * dmabufmgr members protected by the dmabufmgr::lru_lock
+ * only when written to.
+ */
+ atomic_t reserved;
};
/**
Hello!
This quick update on the patchset which replaces custom consistent dma
regions usage in dma-mapping framework in favour of generic vmalloc
areas created on demand for each allocation. The main purpose for this
patchset is to remove 2MiB limit of dma coherent/writecombine
allocations.
In this version arch-independent VM_DMA flag has been replaced with
ARM-specific VM_ARM_DMA_CONSISTENT flag.
This patch is based on vanilla v3.5-rc4 release.
Best regards
Marek Szyprowski
Samsung Poland R&D Center
Changelog:
v4:
- replaced arch-independent VM_DMA flag with ARM-specific
VM_ARM_DMA_CONSISTENT flag
v3: http://thread.gmane.org/gmane.linux.kernel.mm/80028/
- rebased onto v3.4-rc2: added support for IOMMU-aware implementation
of dma-mapping calls, unified with CMA coherent dma pool
- implemented changes requested by Minchan Kim: added more checks for
vmarea->flags & VM_DMA, renamed some variables, removed obsole locks,
squashed find_vm_area() exporting patch into the main redesign patch
v2: http://thread.gmane.org/gmane.linux.kernel.mm/78563
- added support for atomic allocations (served from preallocated pool)
- minor cleanup here and there
- rebased onto v3.4-rc7
v1: http://thread.gmane.org/gmane.linux.kernel.mm/76703
- initial version
Patch summary:
Marek Szyprowski (2):
mm: vmalloc: use const void * for caller argument
ARM: dma-mapping: remove custom consistent dma region
Documentation/kernel-parameters.txt | 2 +-
arch/arm/include/asm/dma-mapping.h | 2 +-
arch/arm/mm/dma-mapping.c | 505 +++++++++++++----------------------
arch/arm/mm/mm.h | 3 +
include/linux/vmalloc.h | 9 +-
mm/vmalloc.c | 28 ++-
6 files changed, 207 insertions(+), 342 deletions(-)
--
1.7.1.569.g6f426
From: Subash Patel <subash.rp(a)samsung.com>
This patch series is split and re-send of my original patch, after request
from Inki Dae.
Below two patches add the required error checks in drm dmabuf when the
system fails to allocate pages for the scatter-gather. This is very rare
situation, and occurs when the system is under memory pressure.
Scatter-gather asks for memory using sg_kmalloc() and this can return no
memory. Without below return value checks, the code will crash the system
due to invalid pointer access.
Subash Patel (2):
DRM: Exynos: return NULL if exynos_pages_to_sg fails
DRM: Exynos: check for null in return value of
dma_buf_map_attachment()
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
--
1.7.9.5
Hey,
Due to inertia, I thought I would take a shot at implicit synchronization as well.
I have just barely enough to make it work for nouveau to synchronize with itself
now using the cpu. Hopefully the general idea is correct but I feel the
implementation wrong.
There are 2 ways to get deadlocks if no proper care is taken to avoid it,
the first being 2 tasks taking each device's lock in a different order,
the second 2 devices waiting on completion of each other before starting
own work. The easiest way to avoid this is to introduce a global
dma_buf_submit_mutex so in cases where synchronization is needed.
This way only 1 submission involving dma-buffer synchronisation can be made
simultaneously. This will make it impossible to deadlock because even if you
take dma mutex->dev a mutex->dev b mutex, and swap a and b, the dmabuf mutex
would prevent this from being done at the same time.
That leaves the real problem of the synchronization itself. I felt that because
the code involved was already sharing dma-buf's, the easiest way to implement it
would be.. another dma-buf. Some hardware might have specific requirements on them,
so I haven't pinned down the exact details yet.
It's a bit of intermingling between drm and dma-buf namespace since it is an early
wip, any comments are welcome though.
This is what I used so far:
#define DRM_PRIME_FENCE_MAX 2
struct drm_prime_fence {
struct dma_buf *sync_buf;
uint64_t offset;
uint32_t value;
enum {
/* Nop is to allow preparations in case dma_buf
* is different for release, so the call will
* never fail at that point.
*/
DRM_PRIME_FENCE_NOP = 0,
DRM_PRIME_FENCE_WAIT_EQ,
// DRM_PRIME_FENCE_WAIT_GE, /* block while ((int)(cur - expected) < 0); */
DRM_PRIME_FENCE_SET
} op;
};
and added to struct dma_buf_ops:
/* drm_prime_fence_ is written by function to indicate what is needed
* to acquire this buffer, up to DRM_PRIME_FENCE_MAX buffers are allowed
* sync_acquire returns a negative value on error, otherwise
* amounts of fence ops that need to be executed.
*
* Release is not allowed to fail and merely returns number of
* fence ops that needs to be executed after command stream is done.
* Abort occurs when there's a failure between acquire and release,
* for example because dma-buf's from multiple devices are involved
* and the other one failed to acquire.
*/
int (*sync_acquire)(struct dma_buf *, struct drm_prime_fence fence[2],
unsigned long align, unsigned long release_write);
int (*sync_release)(struct dma_buf * struct drm_prime_fence fence[2]);
void (*sync_abort)(struct dma_buf *);
I'm not completely sure about this part yet, align can be seen as minimum
alignment requirement, ideally I would negotiate those earlier but I haven't
found the correct place yet, maybe on attach?
nouveau writes a 16 byte stamp as part of it's semaphore ops
(4 bytes programmable, 4 bytes pad, 8 bytes timestamp) which is why I need
to communicate those requirements somehow. Not all nouveau cards wold support
DRM_PRIME_FENCE_WAIT_GE either.
I think there is a great power in making the sync object itself just another
dma-buf that can be written to and/or read. Especially since all graphics
card have some way to write an arbitrary 4-byte value to an arbitrary location
(even the oldest intel cards have a blitter! :D). I'm hoping for more input
into making the api better for other users too, which is why I'm posting
this as early as I had something working (for some definition of working).
Thoughts?
~Maarten
The goal of those patches is to allow ION clients (drivers or userland applications)
to use Contiguous Memory Allocator (CMA).
To get more info about CMA:
http://lists.linaro.org/pipermail/linaro-mm-sig/2012-February/001328.html
patches version 6:
- add private field in ion_platform_heap to pass the device
linked with CMA.
- rework CMA heap to use private field.
- prepare CMA heap for incoming dma_common_get_sgtable function
http://lists.linaro.org/pipermail/linaro-mm-sig/2012-June/002109.html
- simplify ion-ux500 driver.
patches version 5:
- port patches on android kernel 3.4 where ION use dmabuf
- add ion_cma_heap_map_dma and ion_cma_heap_unmap_dma functions
patches version 4:
- add ION_HEAP_TYPE_DMA heap type in ion_heap_type enum.
- CMA heap is now a "native" ION heap.
- add ion_heap_create_full function to keep backward compatibilty.
- clean up included files in CMA heap
- ux500-ion is using ion_heap_create_full instead of ion_heap_create
patches version 3:
- add a private field in ion_heap structure instead of expose ion_device
structure to all heaps
- ion_cma_heap is no more a platform driver
- ion_cma_heap use ion_heap private field to store the device pointer and
make the link with reserved CMA regions
- provide ux500-ion driver and configuration file for snowball board to give
an example of how use CMA heaps
patches version 2:
- fix comments done by Andy Green
Benjamin Gaignard (4):
fix ion_platform_data definition
add private field in ion_heap and ion_platform_heap structure
add CMA heap
add test/example driver for ux500 platform
arch/arm/mach-ux500/board-mop500.c | 64 +++++++++++++
drivers/gpu/ion/Kconfig | 5 +
drivers/gpu/ion/Makefile | 5 +-
drivers/gpu/ion/ion_cma_heap.c | 179 ++++++++++++++++++++++++++++++++++++
drivers/gpu/ion/ion_heap.c | 11 +++
drivers/gpu/ion/ion_priv.h | 8 ++
drivers/gpu/ion/ux500/Makefile | 1 +
drivers/gpu/ion/ux500/ux500_ion.c | 134 +++++++++++++++++++++++++++
include/linux/ion.h | 7 +-
9 files changed, 412 insertions(+), 2 deletions(-)
create mode 100644 drivers/gpu/ion/ion_cma_heap.c
create mode 100644 drivers/gpu/ion/ux500/Makefile
create mode 100644 drivers/gpu/ion/ux500/ux500_ion.c
--
1.7.10
From: Subash Patel <subash.rp(a)samsung.com>
exynos_pages_to_sg() internally calls sg_kmalloc() which can return
no pages when the system is under high memory crunch. One such instance
is chromeos-install in the chromeos. This patch adds check for the return
value of the function in subject to return NULL on failure.
Change-Id: I541ed30491a926ebe72738225041c9f2d88007bc
Signed-off-by: Subash Patel <subash.ramaswamy(a)linaro.org>
CC: dri-devel(a)lists.freedesktop.org
CC: linux-samsung-soc(a)vger.kernel.org
CC: linaro-mm-sig(a)lists.linaro.org
CC: inki.dae(a)samsung.com
CC: airlied(a)redhat.com
CC: olofj(a)chromium.org
---
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 97325c1..52cf761 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -87,6 +87,10 @@ static struct sg_table *
npages = buf->size / buf->page_size;
sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
+ if (!sgt) {
+ DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ goto err_unlock;
+ }
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
--
1.7.9.5
Hi Linus,
I would like to ask for pulling another minor fixup for ARM dma-mapping
redesign and extensions merged in v3.5-rc1.
The following changes since commit 6b16351acbd415e66ba16bf7d473ece1574cf0bc:
Linux 3.5-rc4 (2012-06-24 12:53:04 -0700)
with the top-most commit 593f47355467b9ef44293698817e2bdb347e2d11
ARM: dma-mapping: fix buffer chunk allocation order
are available in the git repository at:
git://git.linaro.org/people/mszyprowski/linux-dma-mapping.git fixes-for-linus
Marek Szyprowski (1):
ARM: dma-mapping: fix buffer chunk allocation order
arch/arm/mm/dma-mapping.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
Thanks!
Best regards
Marek Szyprowski
Samsung Poland R&D Center