From: Rob Clark robdclark@chromium.org
Conversion to DRM GPU VA Manager[1], and adding support for Vulkan Sparse Memory[2] in the form of:
1. A new VM_BIND submitqueue type for executing VM MSM_SUBMIT_BO_OP_MAP/ MAP_NULL/UNMAP commands
2. A new VM_BIND ioctl to allow submitting batches of one or more MAP/MAP_NULL/UNMAP commands to a VM_BIND submitqueue
I did not implement support for synchronous VM_BIND commands. Since userspace could just immediately wait for the `SUBMIT` to complete, I don't think we need this extra complexity in the kernel. Synchronous/immediate VM_BIND operations could be implemented with a 2nd VM_BIND submitqueue.
The corresponding mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32533
Changes in v3: - Switched to seperate VM_BIND ioctl. This makes the UABI a bit cleaner, but OTOH the userspace code was cleaner when the end result of either type of VkQueue lead to the same ioctl. So I'm a bit on the fence. - Switched to doing the gpuvm bookkeeping synchronously, and only deferring the pgtable updates. This avoids needing to hold any resv locks in the fence signaling path, resolving the last shrinker related lockdep complaints. OTOH it means userspace can trigger invalid pgtable updates with multiple VM_BIND queues. In this case, we ensure that unmaps happen completely (to prevent userspace from using this to access free'd pages), mark the context as unusable, and move on with life. - Link to v2: https://lore.kernel.org/all/20250319145425.51935-1-robdclark@gmail.com/
Changes in v2: - Dropped Bibek Kumar Patro's arm-smmu patches[3], which have since been merged. - Pre-allocate all the things, and drop HACK patch which disabled shrinker. This includes ensuring that vm_bo objects are allocated up front, pre- allocating VMA objects, and pre-allocating pages used for pgtable updates. The latter utilizes io_pgtable_cfg callbacks for pgtable alloc/free, that were initially added for panthor. - Add back support for BO dumping for devcoredump. - Link to v1 (RFC): https://lore.kernel.org/dri-devel/20241207161651.410556-1-robdclark@gmail.co...
[1] https://www.kernel.org/doc/html/next/gpu/drm-mm.html#drm-gpuvm [2] https://docs.vulkan.org/spec/latest/chapters/sparsemem.html [3] https://patchwork.kernel.org/project/linux-arm-kernel/list/?series=909700
Rob Clark (33): drm/gpuvm: Don't require obj lock in destructor path drm/gpuvm: Allow VAs to hold soft reference to BOs iommu/io-pgtable-arm: Add quirk to quiet WARN_ON() drm/msm: Rename msm_file_private -> msm_context drm/msm: Improve msm_context comments drm/msm: Rename msm_gem_address_space -> msm_gem_vm drm/msm: Remove vram carveout support drm/msm: Collapse vma allocation and initialization drm/msm: Collapse vma close and delete drm/msm: Don't close VMAs on purge drm/msm: drm_gpuvm conversion drm/msm: Convert vm locking drm/msm: Use drm_gpuvm types more drm/msm: Split out helper to get iommu prot flags drm/msm: Add mmu support for non-zero offset drm/msm: Add PRR support drm/msm: Rename msm_gem_vma_purge() -> _unmap() drm/msm: Lazily create context VM drm/msm: Add opt-in for VM_BIND drm/msm: Mark VM as unusable on GPU hangs drm/msm: Add _NO_SHARE flag drm/msm: Crashdump prep for sparse mappings drm/msm: rd dumping prep for sparse mappings drm/msm: Crashdec support for sparse drm/msm: rd dumping support for sparse drm/msm: Extract out syncobj helpers drm/msm: Use DMA_RESV_USAGE_BOOKKEEP/KERNEL drm/msm: Add VM_BIND submitqueue drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON drm/msm: Support pgtable preallocation drm/msm: Split out map/unmap ops drm/msm: Add VM_BIND ioctl drm/msm: Bump UAPI version
drivers/gpu/drm/drm_gpuvm.c | 15 +- drivers/gpu/drm/msm/Kconfig | 1 + drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 25 +- drivers/gpu/drm/msm/adreno/a2xx_gpummu.c | 5 +- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 17 +- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 17 +- drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 4 +- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 22 +- drivers/gpu/drm/msm/adreno/a5xx_power.c | 2 +- drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 10 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 32 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 2 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 49 +- drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 6 +- drivers/gpu/drm/msm/adreno/a6xx_preempt.c | 10 +- drivers/gpu/drm/msm/adreno/adreno_device.c | 4 - drivers/gpu/drm/msm/adreno/adreno_gpu.c | 88 +- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 23 +- .../drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 14 +- drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c | 18 +- drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h | 2 +- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 18 +- drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | 14 +- drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h | 4 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 6 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 28 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c | 12 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 4 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 19 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 12 +- drivers/gpu/drm/msm/dsi/dsi_host.c | 14 +- drivers/gpu/drm/msm/msm_drv.c | 183 +-- drivers/gpu/drm/msm/msm_drv.h | 35 +- drivers/gpu/drm/msm/msm_fb.c | 18 +- drivers/gpu/drm/msm/msm_fbdev.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 489 +++---- drivers/gpu/drm/msm/msm_gem.h | 217 ++- drivers/gpu/drm/msm/msm_gem_prime.c | 15 + drivers/gpu/drm/msm/msm_gem_shrinker.c | 4 +- drivers/gpu/drm/msm/msm_gem_submit.c | 295 ++-- drivers/gpu/drm/msm/msm_gem_vma.c | 1265 +++++++++++++++-- drivers/gpu/drm/msm/msm_gpu.c | 171 ++- drivers/gpu/drm/msm/msm_gpu.h | 132 +- drivers/gpu/drm/msm/msm_iommu.c | 298 +++- drivers/gpu/drm/msm/msm_kms.c | 18 +- drivers/gpu/drm/msm/msm_kms.h | 2 +- drivers/gpu/drm/msm/msm_mmu.h | 38 +- drivers/gpu/drm/msm/msm_rd.c | 62 +- drivers/gpu/drm/msm/msm_ringbuffer.c | 4 +- drivers/gpu/drm/msm/msm_submitqueue.c | 86 +- drivers/gpu/drm/msm/msm_syncobj.c | 172 +++ drivers/gpu/drm/msm/msm_syncobj.h | 37 + drivers/iommu/io-pgtable-arm.c | 18 +- include/drm/drm_gpuvm.h | 12 +- include/linux/io-pgtable.h | 8 + include/uapi/drm/msm_drm.h | 149 +- 57 files changed, 3012 insertions(+), 1216 deletions(-) create mode 100644 drivers/gpu/drm/msm/msm_syncobj.c create mode 100644 drivers/gpu/drm/msm/msm_syncobj.h
From: Rob Clark robdclark@chromium.org
Buffers that are not shared between contexts can share a single resv object. This way drm_gpuvm will not track them as external objects, and submit-time validating overhead will be O(1) for all N non-shared BOs, instead of O(n).
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_drv.h | 1 + drivers/gpu/drm/msm/msm_gem.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gem_prime.c | 15 +++++++++++++++ include/uapi/drm/msm_drm.h | 14 ++++++++++++++ 4 files changed, 53 insertions(+)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b77fd2c531c3..b0add236cbb3 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -246,6 +246,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags); int msm_gem_prime_pin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3708d4579203..d0f44c981351 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -532,6 +532,9 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
msm_gem_assert_locked(obj);
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE) + return -EINVAL; + vma = get_vma_locked(obj, vm, range_start, range_end); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -1060,6 +1063,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj) put_pages(obj); }
+ if (msm_obj->flags & MSM_BO_NO_SHARE) { + struct drm_gem_object *r_obj = + container_of(obj->resv, struct drm_gem_object, _resv); + + BUG_ON(obj->resv == &obj->_resv); + + /* Drop reference we hold to shared resv obj: */ + drm_gem_object_put(r_obj); + } + drm_gem_object_release(obj);
kfree(msm_obj->metadata); @@ -1092,6 +1105,15 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, if (name) msm_gem_object_set_name(obj, "%s", name);
+ if (flags & MSM_BO_NO_SHARE) { + struct msm_context *ctx = file->driver_priv; + struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm); + + drm_gem_object_get(r_obj); + + obj->resv = r_obj->resv; + } + ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */ @@ -1124,6 +1146,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = { .free = msm_gem_free_object, .open = msm_gem_open, .close = msm_gem_close, + .export = msm_gem_prime_export, .pin = msm_gem_prime_pin, .unpin = msm_gem_prime_unpin, .get_sg_table = msm_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index ee267490c935..1a6d8099196a 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -16,6 +16,9 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); int npages = obj->size >> PAGE_SHIFT;
+ if (msm_obj->flags & MSM_BO_NO_SHARE) + return ERR_PTR(-EINVAL); + if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ return ERR_PTR(-ENOMEM);
@@ -45,6 +48,15 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, return msm_gem_import(dev, attach->dmabuf, sg); }
+ +struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags) +{ + if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE) + return ERR_PTR(-EPERM); + + return drm_gem_prime_export(obj, flags); +} + int msm_gem_prime_pin(struct drm_gem_object *obj) { struct page **pages; @@ -53,6 +65,9 @@ int msm_gem_prime_pin(struct drm_gem_object *obj) if (obj->import_attach) return 0;
+ if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE) + return -EINVAL; + pages = msm_gem_pin_pages_locked(obj); if (IS_ERR(pages)) ret = PTR_ERR(pages); diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index b974f5a24dbc..1bccc347945c 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -140,6 +140,19 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */ #define MSM_BO_GPU_READONLY 0x00000002 +/* Private buffers do not need to be explicitly listed in the SUBMIT + * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private + * buffers may NOT be imported/exported or used for scanout (or any + * other situation where buffers can be indefinitely pinned, but + * cases other than scanout are all kernel owned BOs which are not + * visible to userspace). + * + * In exchange for those constraints, all private BOs associated with + * a single context (drm_file) share a single dma_resv, and if there + * has been no eviction since the last submit, there are no per-BO + * bookeeping to do, significantly cutting the SUBMIT overhead. + */ +#define MSM_BO_NO_SHARE 0x00000004 #define MSM_BO_CACHE_MASK 0x000f0000 /* cache modes */ #define MSM_BO_CACHED 0x00010000 @@ -149,6 +162,7 @@ struct drm_msm_param {
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \ MSM_BO_GPU_READONLY | \ + MSM_BO_NO_SHARE | \ MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
From: Rob Clark robdclark@chromium.org
We'll be re-using these for the VM_BIND ioctl.
Also, rename a few things in the uapi header to reflect that syncobj use is not specific to the submit ioctl.
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/msm_gem_submit.c | 192 ++------------------------- drivers/gpu/drm/msm/msm_syncobj.c | 172 ++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_syncobj.h | 37 ++++++ include/uapi/drm/msm_drm.h | 26 ++-- 5 files changed, 235 insertions(+), 193 deletions(-) create mode 100644 drivers/gpu/drm/msm/msm_syncobj.c create mode 100644 drivers/gpu/drm/msm/msm_syncobj.h
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 5df20cbeafb8..8af34f87e0c8 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -128,6 +128,7 @@ msm-y += \ msm_rd.o \ msm_ringbuffer.o \ msm_submitqueue.o \ + msm_syncobj.o \ msm_gpu_tracepoints.o \
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 375d89f23cd1..552ff2d57294 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -16,6 +16,7 @@ #include "msm_gpu.h" #include "msm_gem.h" #include "msm_gpu_trace.h" +#include "msm_syncobj.h"
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable * error msgs for debugging, but we don't spam dmesg by default @@ -477,173 +478,6 @@ void msm_submit_retire(struct msm_gem_submit *submit) } }
-struct msm_submit_post_dep { - struct drm_syncobj *syncobj; - uint64_t point; - struct dma_fence_chain *chain; -}; - -static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit, - struct drm_file *file, - uint64_t in_syncobjs_addr, - uint32_t nr_in_syncobjs, - size_t syncobj_stride) -{ - struct drm_syncobj **syncobjs = NULL; - struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; - int ret = 0; - uint32_t i, j; - - syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (!syncobjs) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < nr_in_syncobjs; ++i) { - uint64_t address = in_syncobjs_addr + i * syncobj_stride; - - if (copy_from_user(&syncobj_desc, - u64_to_user_ptr(address), - min(syncobj_stride, sizeof(syncobj_desc)))) { - ret = -EFAULT; - break; - } - - if (syncobj_desc.point && - !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) { - ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported"); - break; - } - - if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) { - ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags); - break; - } - - ret = drm_sched_job_add_syncobj_dependency(&submit->base, file, - syncobj_desc.handle, syncobj_desc.point); - if (ret) - break; - - if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) { - syncobjs[i] = - drm_syncobj_find(file, syncobj_desc.handle); - if (!syncobjs[i]) { - ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i); - break; - } - } - } - - if (ret) { - for (j = 0; j <= i; ++j) { - if (syncobjs[j]) - drm_syncobj_put(syncobjs[j]); - } - kfree(syncobjs); - return ERR_PTR(ret); - } - return syncobjs; -} - -static void msm_reset_syncobjs(struct drm_syncobj **syncobjs, - uint32_t nr_syncobjs) -{ - uint32_t i; - - for (i = 0; syncobjs && i < nr_syncobjs; ++i) { - if (syncobjs[i]) - drm_syncobj_replace_fence(syncobjs[i], NULL); - } -} - -static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, - struct drm_file *file, - uint64_t syncobjs_addr, - uint32_t nr_syncobjs, - size_t syncobj_stride) -{ - struct msm_submit_post_dep *post_deps; - struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; - int ret = 0; - uint32_t i, j; - - post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (!post_deps) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < nr_syncobjs; ++i) { - uint64_t address = syncobjs_addr + i * syncobj_stride; - - if (copy_from_user(&syncobj_desc, - u64_to_user_ptr(address), - min(syncobj_stride, sizeof(syncobj_desc)))) { - ret = -EFAULT; - break; - } - - post_deps[i].point = syncobj_desc.point; - - if (syncobj_desc.flags) { - ret = UERR(EINVAL, dev, "invalid syncobj flags"); - break; - } - - if (syncobj_desc.point) { - if (!drm_core_check_feature(dev, - DRIVER_SYNCOBJ_TIMELINE)) { - ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported"); - break; - } - - post_deps[i].chain = dma_fence_chain_alloc(); - if (!post_deps[i].chain) { - ret = -ENOMEM; - break; - } - } - - post_deps[i].syncobj = - drm_syncobj_find(file, syncobj_desc.handle); - if (!post_deps[i].syncobj) { - ret = UERR(EINVAL, dev, "invalid syncobj handle"); - break; - } - } - - if (ret) { - for (j = 0; j <= i; ++j) { - dma_fence_chain_free(post_deps[j].chain); - if (post_deps[j].syncobj) - drm_syncobj_put(post_deps[j].syncobj); - } - - kfree(post_deps); - return ERR_PTR(ret); - } - - return post_deps; -} - -static void msm_process_post_deps(struct msm_submit_post_dep *post_deps, - uint32_t count, struct dma_fence *fence) -{ - uint32_t i; - - for (i = 0; post_deps && i < count; ++i) { - if (post_deps[i].chain) { - drm_syncobj_add_point(post_deps[i].syncobj, - post_deps[i].chain, - fence, post_deps[i].point); - post_deps[i].chain = NULL; - } else { - drm_syncobj_replace_fence(post_deps[i].syncobj, - fence); - } - } -} - int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { @@ -654,7 +488,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_gpu *gpu = priv->gpu; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; - struct msm_submit_post_dep *post_deps = NULL; + struct msm_syncobj_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; int out_fence_fd = -1; unsigned i; @@ -730,10 +564,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, }
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) { - syncobjs_to_reset = msm_parse_deps(submit, file, - args->in_syncobjs, - args->nr_in_syncobjs, - args->syncobj_stride); + syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base, + file, args->in_syncobjs, + args->nr_in_syncobjs, + args->syncobj_stride); if (IS_ERR(syncobjs_to_reset)) { ret = PTR_ERR(syncobjs_to_reset); goto out_unlock; @@ -741,10 +575,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, }
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) { - post_deps = msm_parse_post_deps(dev, file, - args->out_syncobjs, - args->nr_out_syncobjs, - args->syncobj_stride); + post_deps = msm_syncobj_parse_post_deps(dev, file, + args->out_syncobjs, + args->nr_out_syncobjs, + args->syncobj_stride); if (IS_ERR(post_deps)) { ret = PTR_ERR(post_deps); goto out_unlock; @@ -887,10 +721,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->fence = submit->fence_id; queue->last_fence = submit->fence_id;
- msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); - msm_process_post_deps(post_deps, args->nr_out_syncobjs, - submit->user_fence); - + msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs); + msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
out: submit_cleanup(submit, !!ret); diff --git a/drivers/gpu/drm/msm/msm_syncobj.c b/drivers/gpu/drm/msm/msm_syncobj.c new file mode 100644 index 000000000000..4baa9f522c54 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_syncobj.c @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2020 Google, Inc */ + +#include "drm/drm_drv.h" + +#include "msm_drv.h" +#include "msm_syncobj.h" + +struct drm_syncobj ** +msm_syncobj_parse_deps(struct drm_device *dev, + struct drm_sched_job *job, + struct drm_file *file, + uint64_t in_syncobjs_addr, + uint32_t nr_in_syncobjs, + size_t syncobj_stride) +{ + struct drm_syncobj **syncobjs = NULL; + struct drm_msm_syncobj syncobj_desc = {0}; + int ret = 0; + uint32_t i, j; + + syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!syncobjs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nr_in_syncobjs; ++i) { + uint64_t address = in_syncobjs_addr + i * syncobj_stride; + + if (copy_from_user(&syncobj_desc, + u64_to_user_ptr(address), + min(syncobj_stride, sizeof(syncobj_desc)))) { + ret = -EFAULT; + break; + } + + if (syncobj_desc.point && + !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) { + ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported"); + break; + } + + if (syncobj_desc.flags & ~MSM_SYNCOBJ_FLAGS) { + ret = UERR(EINVAL, dev, "invalid syncobj flags: %x", syncobj_desc.flags); + break; + } + + ret = drm_sched_job_add_syncobj_dependency(job, file, + syncobj_desc.handle, + syncobj_desc.point); + if (ret) + break; + + if (syncobj_desc.flags & MSM_SYNCOBJ_RESET) { + syncobjs[i] = drm_syncobj_find(file, syncobj_desc.handle); + if (!syncobjs[i]) { + ret = UERR(EINVAL, dev, "invalid syncobj handle: %u", i); + break; + } + } + } + + if (ret) { + for (j = 0; j <= i; ++j) { + if (syncobjs[j]) + drm_syncobj_put(syncobjs[j]); + } + kfree(syncobjs); + return ERR_PTR(ret); + } + return syncobjs; +} + +void +msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs) +{ + uint32_t i; + + for (i = 0; syncobjs && i < nr_syncobjs; ++i) { + if (syncobjs[i]) + drm_syncobj_replace_fence(syncobjs[i], NULL); + } +} + +struct msm_syncobj_post_dep * +msm_syncobj_parse_post_deps(struct drm_device *dev, + struct drm_file *file, + uint64_t syncobjs_addr, + uint32_t nr_syncobjs, + size_t syncobj_stride) +{ + struct msm_syncobj_post_dep *post_deps; + struct drm_msm_syncobj syncobj_desc = {0}; + int ret = 0; + uint32_t i, j; + + post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!post_deps) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nr_syncobjs; ++i) { + uint64_t address = syncobjs_addr + i * syncobj_stride; + + if (copy_from_user(&syncobj_desc, + u64_to_user_ptr(address), + min(syncobj_stride, sizeof(syncobj_desc)))) { + ret = -EFAULT; + break; + } + + post_deps[i].point = syncobj_desc.point; + + if (syncobj_desc.flags) { + ret = UERR(EINVAL, dev, "invalid syncobj flags"); + break; + } + + if (syncobj_desc.point) { + if (!drm_core_check_feature(dev, + DRIVER_SYNCOBJ_TIMELINE)) { + ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported"); + break; + } + + post_deps[i].chain = dma_fence_chain_alloc(); + if (!post_deps[i].chain) { + ret = -ENOMEM; + break; + } + } + + post_deps[i].syncobj = + drm_syncobj_find(file, syncobj_desc.handle); + if (!post_deps[i].syncobj) { + ret = UERR(EINVAL, dev, "invalid syncobj handle"); + break; + } + } + + if (ret) { + for (j = 0; j <= i; ++j) { + dma_fence_chain_free(post_deps[j].chain); + if (post_deps[j].syncobj) + drm_syncobj_put(post_deps[j].syncobj); + } + + kfree(post_deps); + return ERR_PTR(ret); + } + + return post_deps; +} + +void +msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps, + uint32_t count, struct dma_fence *fence) +{ + uint32_t i; + + for (i = 0; post_deps && i < count; ++i) { + if (post_deps[i].chain) { + drm_syncobj_add_point(post_deps[i].syncobj, + post_deps[i].chain, + fence, post_deps[i].point); + post_deps[i].chain = NULL; + } else { + drm_syncobj_replace_fence(post_deps[i].syncobj, + fence); + } + } +} diff --git a/drivers/gpu/drm/msm/msm_syncobj.h b/drivers/gpu/drm/msm/msm_syncobj.h new file mode 100644 index 000000000000..bcaa15d01da0 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_syncobj.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2020 Google, Inc */ + +#ifndef __MSM_GEM_SYNCOBJ_H__ +#define __MSM_GEM_SYNCOBJ_H__ + +#include "drm/drm_device.h" +#include "drm/drm_syncobj.h" +#include "drm/gpu_scheduler.h" + +struct msm_syncobj_post_dep { + struct drm_syncobj *syncobj; + uint64_t point; + struct dma_fence_chain *chain; +}; + +struct drm_syncobj ** +msm_syncobj_parse_deps(struct drm_device *dev, + struct drm_sched_job *job, + struct drm_file *file, + uint64_t in_syncobjs_addr, + uint32_t nr_in_syncobjs, + size_t syncobj_stride); + +void msm_syncobj_reset(struct drm_syncobj **syncobjs, uint32_t nr_syncobjs); + +struct msm_syncobj_post_dep * +msm_syncobj_parse_post_deps(struct drm_device *dev, + struct drm_file *file, + uint64_t syncobjs_addr, + uint32_t nr_syncobjs, + size_t syncobj_stride); + +void msm_syncobj_process_post_deps(struct msm_syncobj_post_dep *post_deps, + uint32_t count, struct dma_fence *fence); + +#endif /* __MSM_GEM_SYNCOBJ_H__ */ diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 1bccc347945c..2c2fc4b284d0 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -220,6 +220,17 @@ struct drm_msm_gem_cpu_fini { * Cmdstream Submission: */
+#define MSM_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */ +#define MSM_SYNCOBJ_FLAGS ( \ + MSM_SYNCOBJ_RESET | \ + 0) + +struct drm_msm_syncobj { + __u32 handle; /* in, syncobj handle. */ + __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */ + __u64 point; /* in, timepoint for timeline syncobjs. */ +}; + /* The value written into the cmdstream is logically: * * ((relocbuf->gpuaddr + reloc_offset) << shift) | or @@ -309,17 +320,6 @@ struct drm_msm_gem_submit_bo { MSM_SUBMIT_FENCE_SN_IN | \ 0)
-#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */ -#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \ - MSM_SUBMIT_SYNCOBJ_RESET | \ - 0) - -struct drm_msm_gem_submit_syncobj { - __u32 handle; /* in, syncobj handle. */ - __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */ - __u64 point; /* in, timepoint for timeline syncobjs. */ -}; - /* Each cmdstream submit consists of a table of buffers involved, and * one or more cmdstream buffers. This allows for conditional execution * (context-restore), and IB buffers needed for per tile/bin draw cmds. @@ -333,8 +333,8 @@ struct drm_msm_gem_submit { __u64 cmds; /* in, ptr to array of submit_cmd's */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ __u32 queueid; /* in, submitqueue id */ - __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */ - __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */ + __u64 in_syncobjs; /* in, ptr to array of drm_msm_syncobj */ + __u64 out_syncobjs; /* in, ptr to array of drm_msm_syncobj */ __u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */ __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */ __u32 syncobj_stride; /* in, stride of syncobj arrays. */
From: Rob Clark robdclark@chromium.org
This submitqueue type isn't tied to a hw ringbuffer, but instead executes on the CPU for performing async VM_BIND ops.
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_gem.h | 12 +++++ drivers/gpu/drm/msm/msm_gem_submit.c | 60 +++++++++++++++++++--- drivers/gpu/drm/msm/msm_gem_vma.c | 71 +++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gpu.h | 3 ++ drivers/gpu/drm/msm/msm_submitqueue.c | 57 ++++++++++++++++----- include/uapi/drm/msm_drm.h | 9 +++- 6 files changed, 191 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index f7b85084e228..c1581bd4b5fd 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -53,6 +53,13 @@ struct msm_gem_vm { /** @base: Inherit from drm_gpuvm. */ struct drm_gpuvm base;
+ /** + * @sched: Scheduler used for asynchronous VM_BIND request. + * + * Unused for kernel managed VMs (where all operations are synchronous). + */ + struct drm_gpu_scheduler sched; + /** * @mm: Memory management for kernel managed VA allocations * @@ -71,6 +78,9 @@ struct msm_gem_vm { */ struct pid *pid;
+ /** @last_fence: Fence for last pending work scheduled on the VM */ + struct dma_fence *last_fence; + /** @faults: the number of GPU hangs associated with this address space */ int faults;
@@ -100,6 +110,8 @@ struct drm_gpuvm * msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, u64 va_start, u64 va_size, bool managed);
+void msm_gem_vm_close(struct drm_gpuvm *gpuvm); + struct msm_fence_context;
#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 552ff2d57294..2e7f8352fa40 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -4,6 +4,7 @@ * Author: Rob Clark robdclark@gmail.com */
+#include <linux/dma-fence-unwrap.h> #include <linux/file.h> #include <linux/sync_file.h> #include <linux/uaccess.h> @@ -249,30 +250,43 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, static int submit_lock_objects(struct msm_gem_submit *submit) { unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT; + struct drm_exec *exec = &submit->exec; int ret;
-// TODO need to add vm_bind path which locks vm resv + external objs drm_exec_init(&submit->exec, flags, submit->nr_bos);
+ if (msm_context_is_vmbind(submit->queue->ctx)) { + drm_exec_until_all_locked (&submit->exec) { + ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + + ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + } + + return 0; + } + drm_exec_until_all_locked (&submit->exec) { ret = drm_exec_lock_obj(&submit->exec, drm_gpuvm_resv_obj(submit->vm)); drm_exec_retry_on_contention(&submit->exec); if (ret) - goto error; + return ret; for (unsigned i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; ret = drm_exec_prepare_obj(&submit->exec, obj, 1); drm_exec_retry_on_contention(&submit->exec); if (ret) - goto error; + return ret; } }
return 0; - -error: - return ret; }
static int submit_fence_sync(struct msm_gem_submit *submit) @@ -357,9 +371,18 @@ static void submit_unpin_objects(struct msm_gem_submit *submit)
static void submit_attach_object_fences(struct msm_gem_submit *submit) { - int i; + struct msm_gem_vm *vm = to_msm_vm(submit->vm); + struct dma_fence *last_fence; + + if (msm_context_is_vmbind(submit->queue->ctx)) { + drm_gpuvm_resv_add_fence(submit->vm, &submit->exec, + submit->user_fence, + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); + return; + }
- for (i = 0; i < submit->nr_bos; i++) { + for (unsigned i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) @@ -369,6 +392,10 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) dma_resv_add_fence(obj->resv, submit->user_fence, DMA_RESV_USAGE_READ); } + + last_fence = vm->last_fence; + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); + dma_fence_put(last_fence); }
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, @@ -522,6 +549,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (!queue) return -ENOENT;
+ if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) { + ret = UERR(EINVAL, dev, "Invalid queue type"); + goto out_post_unlock; + } + ring = gpu->rb[queue->ring_nr];
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { @@ -608,6 +640,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out;
+ if (msm_context_is_vmbind(ctx)) { + /* + * If we are not using VM_BIND, submit_pin_vmas() will validate + * just the BOs attached to the submit. In that case we don't + * need to validate the _entire_ vm, because userspace tracked + * what BOs are associated with the submit. + */ + ret = drm_gpuvm_validate(submit->vm, &submit->exec); + if (ret) + goto out; + } + for (i = 0; i < args->nr_cmds; i++) { struct drm_gem_object *obj; uint64_t iova; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 265c82abc658..5b8769e152c9 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -16,6 +16,7 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) drm_mm_takedown(&vm->mm); if (vm->mmu) vm->mmu->funcs->destroy(vm->mmu); + dma_fence_put(vm->last_fence); put_pid(vm->pid); kfree(vm); } @@ -153,6 +154,9 @@ static const struct drm_gpuvm_ops msm_gpuvm_ops = { .vm_free = msm_gem_vm_free, };
+static const struct drm_sched_backend_ops msm_vm_bind_ops = { +}; + /** * msm_gem_vm_create() - Create and initialize a &msm_gem_vm * @drm: the drm device @@ -191,6 +195,21 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, goto err_free_vm; }
+ if (!managed) { + struct drm_sched_init_args args = { + .ops = &msm_vm_bind_ops, + .num_rqs = 1, + .credit_limit = 1, + .timeout = MAX_SCHEDULE_TIMEOUT, + .name = "msm-vm-bind", + .dev = drm->dev, + }; + + ret = drm_sched_init(&vm->sched, &args); + if (ret) + goto err_free_dummy; + } + drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem, va_start, va_size, 0, 0, &msm_gpuvm_ops); drm_gem_object_put(dummy_gem); @@ -202,8 +221,60 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
return &vm->base;
+err_free_dummy: + drm_gem_object_put(dummy_gem); + err_free_vm: kfree(vm); return ERR_PTR(ret);
} + +/** + * msm_gem_vm_close() - Close a VM + * @gpuvm: The VM to close + * + * Called when the drm device file is closed, to tear down VM related resources + * (which will drop refcounts to GEM objects that were still mapped into the + * VM at the time). + */ +void +msm_gem_vm_close(struct drm_gpuvm *gpuvm) +{ + struct msm_gem_vm *vm = to_msm_vm(gpuvm); + struct drm_gpuva *vma, *tmp; + + /* + * For kernel managed VMs, the VMAs are torn down when the handle is + * closed, so nothing more to do. + */ + if (vm->managed) + return; + + if (vm->last_fence) + dma_fence_wait(vm->last_fence, false); + + /* Kill the scheduler now, so we aren't racing with it for cleanup: */ + drm_sched_stop(&vm->sched, NULL); + drm_sched_fini(&vm->sched); + + /* Tear down any remaining mappings: */ + dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); + drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) { + struct drm_gem_object *obj = vma->gem.obj; + + if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { + drm_gem_object_get(obj); + msm_gem_lock(obj); + } + + msm_gem_vma_unmap(vma); + msm_gem_vma_close(vma); + + if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { + msm_gem_unlock(obj); + drm_gem_object_put(obj); + } + } + dma_resv_unlock(drm_gpuvm_resv(gpuvm)); +} diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 20f52d9636b0..49c8862ada13 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -562,6 +562,9 @@ struct msm_gpu_submitqueue { struct mutex lock; struct kref ref; struct drm_sched_entity *entity; + + /** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */ + struct drm_sched_entity _vm_bind_entity[0]; };
struct msm_gpu_state_bo { diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 8ced49c7557b..99ab780d5d7b 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -72,6 +72,9 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
+ if (queue->entity == &queue->_vm_bind_entity[0]) + drm_sched_entity_destroy(queue->entity); + msm_context_put(queue->ctx);
kfree(queue); @@ -115,6 +118,11 @@ void msm_submitqueue_close(struct msm_context *ctx) list_del(&entry->node); msm_submitqueue_put(entry); } + + if (!ctx->vm) + return; + + msm_gem_vm_close(ctx->vm); }
static struct drm_sched_entity * @@ -160,8 +168,6 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx, struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; enum drm_sched_priority sched_prio; - extern int enable_preemption; - bool preemption_supported; unsigned ring_nr; int ret;
@@ -171,26 +177,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx, if (!priv->gpu) return -ENODEV;
- preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; + if (flags & MSM_SUBMITQUEUE_VM_BIND) { + unsigned sz;
- if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) - return -EINVAL; + /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */ + if (!msm_context_is_vmbind(ctx)) + return -EINVAL;
- ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); - if (ret) - return ret; + if (prio) + return -EINVAL; + + sz = struct_size(queue, _vm_bind_entity, 1); + queue = kzalloc(sz, GFP_KERNEL); + } else { + extern int enable_preemption; + bool preemption_supported = + priv->gpu->nr_rings == 1 && enable_preemption != 0; + + if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) + return -EINVAL;
- queue = kzalloc(sizeof(*queue), GFP_KERNEL); + ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); + if (ret) + return ret; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + }
if (!queue) return -ENOMEM;
kref_init(&queue->ref); queue->flags = flags; - queue->ring_nr = ring_nr;
- queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], - ring_nr, sched_prio); + if (flags & MSM_SUBMITQUEUE_VM_BIND) { + struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched; + + queue->entity = &queue->_vm_bind_entity[0]; + + drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL, + &sched, 1, NULL); + } else { + queue->ring_nr = ring_nr; + + queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], + ring_nr, sched_prio); + } + if (IS_ERR(queue->entity)) { ret = PTR_ERR(queue->entity); kfree(queue); diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 2c2fc4b284d0..6d6cd1219926 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -385,12 +385,19 @@ struct drm_msm_gem_madvise { /* * Draw queues allow the user to set specific submission parameter. Command * submissions specify a specific submitqueue to use. ID 0 is reserved for - * backwards compatibility as a "default" submitqueue + * backwards compatibility as a "default" submitqueue. + * + * Because VM_BIND async updates happen on the CPU, they must run on a + * virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had + * a way to do pgtable updates on the GPU, we could drop this restriction. */
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001 +#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */ + #define MSM_SUBMITQUEUE_FLAGS ( \ MSM_SUBMITQUEUE_ALLOW_PREEMPT | \ + MSM_SUBMITQUEUE_VM_BIND | \ 0)
/*
From: Rob Clark robdclark@chromium.org
Add a VM_BIND ioctl for binding/unbinding buffers into a VM. This is only supported if userspace has opted in to MSM_PARAM_EN_VM_BIND.
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_drv.c | 1 + drivers/gpu/drm/msm/msm_drv.h | 4 +- drivers/gpu/drm/msm/msm_gem.c | 40 +- drivers/gpu/drm/msm/msm_gem.h | 4 + drivers/gpu/drm/msm/msm_gem_submit.c | 22 +- drivers/gpu/drm/msm/msm_gem_vma.c | 987 ++++++++++++++++++++++++++- include/uapi/drm/msm_drm.h | 74 +- 7 files changed, 1097 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 49e4425c3caf..a63442039e22 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -790,6 +790,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW), };
static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b0add236cbb3..33240afc6365 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -232,7 +232,9 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev); bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data, - struct drm_file *file); + struct drm_file *file); +int msm_ioctl_vm_bind(struct drm_device *dev, void *data, + struct drm_file *file);
#ifdef CONFIG_DEBUG_FS unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b4b299e3f3d3..9d60b38ba58a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -233,8 +233,7 @@ static void put_pages(struct drm_gem_object *obj) } }
-static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, - unsigned madv) +struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -1036,18 +1035,37 @@ static void msm_gem_free_object(struct drm_gem_object *obj) /* * We need to lock any VMs the object is still attached to, but not * the object itself (see explaination in msm_gem_assert_locked()), - * so just open-code this special case: + * so just open-code this special case. + * + * Note that we skip the dance if we aren't attached to any VM. This + * is load bearing. The driver needs to support two usage models: + * + * 1. Legacy kernel managed VM: Userspace expects the VMA's to be + * implicitly torn down when the object is freed, the VMA's do + * not hold a hard reference to the BO. + * + * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the + * BO. This can be dropped when the VM is closed and it's associated + * VMAs are torn down. (See msm_gem_vm_close()). + * + * In the latter case the last reference to a BO can be dropped while + * we already have the VM locked. It would have already been removed + * from the gpuva list, but lockdep doesn't know that. Or understand + * the differences between the two usage models. */ - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked (&exec) { - struct drm_gpuvm_bo *vm_bo; - drm_gem_for_each_gpuvm_bo (vm_bo, obj) { - drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(vm_bo->vm)); - drm_exec_retry_on_contention(&exec); + if (!list_empty(&obj->gpuva.list)) { + drm_exec_init(&exec, 0, 0); + drm_exec_until_all_locked (&exec) { + struct drm_gpuvm_bo *vm_bo; + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + drm_exec_lock_obj(&exec, + drm_gpuvm_resv_obj(vm_bo->vm)); + drm_exec_retry_on_contention(&exec); + } } + put_iova_spaces(obj, NULL, true); + drm_exec_fini(&exec); /* drop locks */ } - put_iova_spaces(obj, NULL, true); - drm_exec_fini(&exec); /* drop locks */
if (obj->import_attach) { GEM_WARN_ON(msm_obj->vaddr); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8ad25927c604..bfeb0f584ae5 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -73,6 +73,9 @@ struct msm_gem_vm { /** @mmu: The mmu object which manages the pgtables */ struct msm_mmu *mmu;
+ /** @mmu_lock: Protects access to the mmu */ + struct mutex mmu_lock; + /** * @pid: For address spaces associated with a specific process, this * will be non-NULL: @@ -205,6 +208,7 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, uint64_t *iova); void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm); void msm_gem_pin_obj_locked(struct drm_gem_object *obj); +struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv); struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 2e7f8352fa40..7cea8743ff68 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -184,6 +184,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, static int submit_lookup_cmds(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { + struct msm_context *ctx = file->driver_priv; unsigned i; size_t sz; int ret = 0; @@ -215,6 +216,20 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, goto out; }
+ if (msm_context_is_vmbind(ctx)) { + if (submit_cmd.nr_relocs) { + ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero"); + goto out; + } + + if (submit_cmd.submit_idx || submit_cmd.submit_offset) { + ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero"); + goto out; + } + + submit->cmd[i].iova = submit_cmd.iova; + } + submit->cmd[i].type = submit_cmd.type; submit->cmd[i].size = submit_cmd.size / 4; submit->cmd[i].offset = submit_cmd.submit_offset / 4; @@ -517,6 +532,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_ringbuffer *ring; struct msm_syncobj_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; + unsigned cmds_to_parse; int out_fence_fd = -1; unsigned i; int ret; @@ -652,7 +668,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; }
- for (i = 0; i < args->nr_cmds; i++) { + cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds; + + for (i = 0; i < cmds_to_parse; i++) { struct drm_gem_object *obj; uint64_t iova;
@@ -683,7 +701,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; }
- submit->nr_cmds = i; + submit->nr_cmds = args->nr_cmds;
idr_preload(GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index f3903825e0b6..69d0d423900e 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -4,9 +4,16 @@ * Author: Rob Clark robdclark@gmail.com */
+#include "drm/drm_file.h" +#include "drm/msm_drm.h" +#include "linux/file.h" +#include "linux/sync_file.h" + #include "msm_drv.h" #include "msm_gem.h" +#include "msm_gpu.h" #include "msm_mmu.h" +#include "msm_syncobj.h"
#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
@@ -34,8 +41,95 @@ struct msm_vm_unmap_op { uint64_t iova; /** @range: size of region to unmap */ uint64_t range; + /** + * @obj: backing object for pages to be unmapped + * + * Async unmap ops must hold a reference to the original GEM object + * backing the mapping that will be unmapped. This ensures that the + * pages backing the mapping are not freed before the mapping is torn + * down. + */ + struct drm_gem_object *obj; +}; + +/** + * struct msm_vma_op - A MAP or UNMAP operation + */ +struct msm_vm_op { + /** @op: The operation type */ + enum { + MSM_VM_OP_MAP, + MSM_VM_OP_UNMAP, + } op; + union { + /** @map: Parameters used if op == MSM_VMA_OP_MAP */ + struct msm_vm_map_op map; + /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */ + struct msm_vm_unmap_op unmap; + }; + /** @node: list head in msm_vm_bind_job::vm_ops */ + struct list_head node; };
+/** + * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl + * + * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL) + * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP) + * which are applied to the pgtables asynchronously. For example a userspace + * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP + * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping. + */ +struct msm_vm_bind_job { + /** @base: base class for drm_sched jobs */ + struct drm_sched_job base; + /** @vm: The VM being operated on */ + struct drm_gpuvm *vm; + /** @fence: The fence that is signaled when job completes */ + struct dma_fence *fence; + /** @queue: The queue that the job runs on */ + struct msm_gpu_submitqueue *queue; + /** @prealloc: Tracking for pre-allocated MMU pgtable pages */ + struct msm_mmu_prealloc prealloc; + /** @vm_ops: a list of struct msm_vm_op */ + struct list_head vm_ops; + /** @bos_pinned: are the GEM objects being bound pinned? */ + bool bos_pinned; + /** @nr_ops: the number of userspace requested ops */ + unsigned int nr_ops; + /** + * @ops: the userspace requested ops + * + * The userspace requested ops are copied/parsed and validated + * before we start applying the updates to try to do as much up- + * front error checking as possible, to avoid the VM being in an + * undefined state due to partially executed VM_BIND. + * + * This table also serves to hold a reference to the backing GEM + * objects. + */ + struct msm_vm_bind_op { + uint32_t op; + uint32_t flags; + union { + struct drm_gem_object *obj; + uint32_t handle; + }; + uint64_t obj_offset; + uint64_t iova; + uint64_t range; + } ops[]; +}; + +#define job_foreach_bo(obj, _job) \ + for (unsigned i = 0; i < (_job)->nr_ops; i++) \ + if ((obj = (_job)->ops[i].obj)) + +static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job) +{ + return container_of(job, struct msm_vm_bind_job, base); +} + static void msm_gem_vm_free(struct drm_gpuvm *gpuvm) { @@ -49,34 +143,52 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) kfree(vm); }
-static void +static int vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) { + if (!vm->managed) + lockdep_assert_held(&vm->mmu_lock); + vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
- vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); + return vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); }
/* Actually unmap memory for the vma */ void msm_gem_vma_unmap(struct drm_gpuva *vma) { + struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma);
/* Don't do anything if the memory isn't mapped */ if (!msm_vma->mapped) return;
- vm_unmap_op(to_msm_vm(vma->vm), &(struct msm_vm_unmap_op){ + /* + * The mmu_lock is only needed when preallocation is used. But + * in that case we don't need to worry about recursion into + * shrinker + */ + if (!vm->managed) + mutex_lock(&vm->mmu_lock); + + vm_unmap_op(vm, &(struct msm_vm_unmap_op){ .iova = vma->va.addr, .range = vma->va.range, });
+ if (!vm->managed) + mutex_unlock(&vm->mmu_lock); + msm_vma->mapped = false; }
static int vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) { + if (!vm->managed) + lockdep_assert_held(&vm->mmu_lock); + vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset, @@ -87,6 +199,7 @@ vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) { + struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma); int ret;
@@ -98,6 +211,14 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
msm_vma->mapped = true;
+ /* + * The mmu_lock is only needed when preallocation is used. But + * in that case we don't need to worry about recursion into + * shrinker + */ + if (!vm->managed) + mutex_lock(&vm->mmu_lock); + /* * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold * a lock across map/unmap which is also used in the job_run() @@ -107,16 +228,19 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) * Revisit this if we can come up with a scheme to pre-alloc pages * for the pgtable in map/unmap ops. */ - ret = vm_map_op(to_msm_vm(vma->vm), &(struct msm_vm_map_op){ + ret = vm_map_op(vm, &(struct msm_vm_map_op){ .iova = vma->va.addr, .range = vma->va.range, .offset = vma->gem.offset, .sgt = sgt, .prot = prot, }); - if (ret) { + + if (!vm->managed) + mutex_unlock(&vm->mmu_lock); + + if (ret) msm_vma->mapped = false; - }
return ret; } @@ -131,6 +255,9 @@ void msm_gem_vma_close(struct drm_gpuva *vma)
drm_gpuvm_resv_assert_held(&vm->base);
+ if (vma->gem.obj) + msm_gem_assert_locked(vma->gem.obj); + if (vma->va.addr && vm->managed) drm_mm_remove_node(&msm_vma->node);
@@ -158,6 +285,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
if (vm->managed) { BUG_ON(offset != 0); + BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */ ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, obj->size, PAGE_SIZE, 0, range_start, range_end, 0); @@ -169,7 +297,8 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj, range_end = range_start + obj->size; }
- GEM_WARN_ON((range_end - range_start) > obj->size); + if (obj) + GEM_WARN_ON((range_end - range_start) > obj->size);
drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); vma->mapped = false; @@ -178,6 +307,9 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj, if (ret) goto err_free_range;
+ if (!obj) + return &vma->base; + vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj); if (IS_ERR(vm_bo)) { ret = PTR_ERR(vm_bo); @@ -199,11 +331,290 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj, return ERR_PTR(ret); }
+static int +msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) +{ + struct drm_gem_object *obj = vm_bo->obj; + struct drm_gpuva *vma; + int ret; + + msm_gem_assert_locked(obj); + + drm_gpuvm_bo_for_each_va (vma, vm_bo) { + ret = msm_gem_pin_vma_locked(obj, vma); + if (ret) + return ret; + } + + return 0; +} + +struct op_arg { + unsigned flags; + struct msm_vm_bind_job *job; +}; + +static void +vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op) +{ + struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL); + *op = _op; + list_add_tail(&op->node, &arg->job->vm_ops); +} + +static struct drm_gpuva * +vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op) +{ + return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, + op->va.addr, op->va.addr + op->va.range); +} + +static void +map_vma(struct drm_gpuva *vma, void *arg) +{ + struct drm_gem_object *obj = vma->gem.obj; + struct sg_table *sgt; + unsigned prot; + + if (obj) { + sgt = to_msm_bo(obj)->sgt; + prot = msm_gem_prot(obj); + } else { + sgt = NULL; + prot = IOMMU_READ | IOMMU_WRITE; + } + + vm_op_enqueue(arg, (struct msm_vm_op){ + .op = MSM_VM_OP_MAP, + .map = { + .sgt = sgt, + .iova = vma->va.addr, + .range = vma->va.range, + .offset = vma->gem.offset, + .prot = prot, + }, + }); + + to_msm_vma(vma)->mapped = true; +} + +static int +msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg) +{ + struct drm_gpuva *vma; + + vma = vma_from_op(arg, &op->map); + if (WARN_ON(IS_ERR(vma))) + return PTR_ERR(vma); + + vm_dbg("%p:%p: %016llx %016llx", vma->vm, vma, vma->va.addr, vma->va.range); + + vma->flags = ((struct op_arg *)arg)->flags; + + map_vma(vma, arg); + + return 0; +} + +static int +msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) +{ + struct drm_gpuvm *vm = ((struct op_arg *)arg)->job->vm; + struct drm_gpuva *orig_vma = op->remap.unmap->va; + struct drm_gpuva *prev_vma = NULL, *next_vma = NULL; + uint64_t unmap_start, unmap_range; + unsigned flags; + + vm_dbg("orig_vma: %p:%p: %016llx %016llx", vm, orig_vma, orig_vma->va.addr, orig_vma->va.range); + + /* + * We are going to need at least some of the orig_vma mapped, the + * easier thing is to just map the whole thing and then unmap the + * unmap_range, instead of trying to optimize for this case + */ + if (!to_msm_vma(orig_vma)->mapped) + map_vma(orig_vma, arg); + + drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); + + if (orig_vma->gem.obj) + drm_gem_object_get(orig_vma->gem.obj); + + vm_op_enqueue(arg, (struct msm_vm_op){ + .op = MSM_VM_OP_UNMAP, + .unmap = { + .iova = unmap_start, + .range = unmap_range, + .obj = orig_vma->gem.obj, + }, + }); + + /* + * Part of this GEM obj is still mapped, but we're going to kill the + * existing VMA and replace it with one or two new ones (ie. two if + * the unmapped range is in the middle of the existing (unmap) VMA). + * So just set the state to unmapped: + */ + to_msm_vma(orig_vma)->mapped = false; + + /* + * The prev_vma and/or next_vma are replacing the unmapped vma, and + * therefore should preserve it's flags: + */ + flags = orig_vma->flags; + + msm_gem_vma_close(orig_vma); + + if (op->remap.prev) { + prev_vma = vma_from_op(arg, op->remap.prev); + if (WARN_ON(IS_ERR(prev_vma))) + return PTR_ERR(prev_vma); + + vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range); + to_msm_vma(prev_vma)->mapped = true; + prev_vma->flags = flags; + } + + if (op->remap.next) { + next_vma = vma_from_op(arg, op->remap.next); + if (WARN_ON(IS_ERR(next_vma))) + return PTR_ERR(next_vma); + + vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range); + to_msm_vma(next_vma)->mapped = true; + next_vma->flags = flags; + } + + return 0; +} + +static int +msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg) +{ + struct drm_gpuva *vma = op->unmap.va; + struct msm_gem_vma *msm_vma = to_msm_vma(vma); + + vm_dbg("%p:%p: %016llx %016llx", vma->vm, vma, vma->va.addr, vma->va.range); + + if (!msm_vma->mapped) + goto out_close; + + if (vma->gem.obj) + drm_gem_object_get(vma->gem.obj); + + vm_op_enqueue(arg, (struct msm_vm_op){ + .op = MSM_VM_OP_UNMAP, + .unmap = { + .iova = vma->va.addr, + .range = vma->va.range, + .obj = vma->gem.obj, + }, + }); + + msm_vma->mapped = false; + +out_close: + msm_gem_vma_close(vma); + + return 0; +} + static const struct drm_gpuvm_ops msm_gpuvm_ops = { .vm_free = msm_gem_vm_free, + .vm_bo_validate = msm_gem_vm_bo_validate, + .sm_step_map = msm_gem_vm_sm_step_map, + .sm_step_remap = msm_gem_vm_sm_step_remap, + .sm_step_unmap = msm_gem_vm_sm_step_unmap, };
+static struct dma_fence * +msm_vma_job_run(struct drm_sched_job *_job) +{ + struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); + struct msm_gem_vm *vm = to_msm_vm(job->vm); + struct drm_gem_object *obj; + int ret = 0; + + vm_dbg(""); + + mutex_lock(&vm->mmu_lock); + vm->mmu->prealloc = &job->prealloc; + + while (!list_empty(&job->vm_ops)) { + struct msm_vm_op *op = + list_first_entry(&job->vm_ops, struct msm_vm_op, node); + + switch (op->op) { + case MSM_VM_OP_MAP: + /* + * On error, stop trying to map new things.. but we + * still want to process the unmaps (or in particular, + * the drm_gem_object_put()s) + */ + if (!ret) + ret = vm_map_op(vm, &op->map); + break; + case MSM_VM_OP_UNMAP: + if (!ret) + ret = vm_unmap_op(vm, &op->unmap); + drm_gem_object_put(op->unmap.obj); + break; + } + list_del(&op->node); + kfree(op); + } + + /* + * We failed to perform at least _some_ of the pgtable updates, so + * now the VM is in an undefined state. Game over! + */ + if (ret) + vm->unusable = true; + + vm->mmu->prealloc = NULL; + mutex_unlock(&vm->mmu_lock); + + job_foreach_bo (obj, job) { + msm_gem_lock(obj); + msm_gem_unpin_locked(obj); + msm_gem_unlock(obj); + } + + /* VM_BIND ops are synchronous, so no fence to wait on: */ + return NULL; +} + +static void +msm_vma_job_free(struct drm_sched_job *_job) +{ + struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); + struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu; + struct drm_gem_object *obj; + + mmu->funcs->prealloc_cleanup(mmu, &job->prealloc); + + drm_sched_job_cleanup(_job); + + job_foreach_bo (obj, job) + drm_gem_object_put(obj); + + msm_submitqueue_put(job->queue); + dma_fence_put(job->fence); + + /* In error paths, we could have unexecuted ops: */ + while (!list_empty(&job->vm_ops)) { + struct msm_vm_op *op = + list_first_entry(&job->vm_ops, struct msm_vm_op, node); + list_del(&op->node); + kfree(op); + } + + kfree(job); +} + static const struct drm_sched_backend_ops msm_vm_bind_ops = { + .run_job = msm_vma_job_run, + .free_job = msm_vma_job_free };
/** @@ -264,6 +675,7 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, drm_gem_object_put(dummy_gem);
vm->mmu = mmu; + mutex_init(&vm->mmu_lock); vm->managed = managed;
drm_mm_init(&vm->mm, va_start, va_size); @@ -276,7 +688,6 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, err_free_vm: kfree(vm); return ERR_PTR(ret); - }
/** @@ -292,6 +703,7 @@ msm_gem_vm_close(struct drm_gpuvm *gpuvm) { struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuva *vma, *tmp; + struct drm_exec exec;
/* * For kernel managed VMs, the VMAs are torn down when the handle is @@ -308,22 +720,557 @@ msm_gem_vm_close(struct drm_gpuvm *gpuvm) drm_sched_fini(&vm->sched);
/* Tear down any remaining mappings: */ - dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); - drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) { - struct drm_gem_object *obj = vma->gem.obj; + drm_exec_init(&exec, 0, 2); + drm_exec_until_all_locked (&exec) { + drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm)); + drm_exec_retry_on_contention(&exec); + + drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) { + struct drm_gem_object *obj = vma->gem.obj; + + /* + * MSM_BO_NO_SHARE objects share the same resv as the + * VM, in which case the obj is already locked: + */ + if (obj && (obj->resv == drm_gpuvm_resv(gpuvm))) + obj = NULL; + + if (obj) { + drm_exec_lock_obj(&exec, obj); + drm_exec_retry_on_contention(&exec); + } + + msm_gem_vma_unmap(vma); + msm_gem_vma_close(vma); + + if (obj) { + drm_exec_unlock_obj(&exec, obj); + } + } + } + drm_exec_fini(&exec); +} + + +static struct msm_vm_bind_job * +vm_bind_job_create(struct drm_device *dev, struct msm_gpu *gpu, + struct msm_gpu_submitqueue *queue, uint32_t nr_ops) +{ + struct msm_vm_bind_job *job; + uint64_t sz; + int ret; + + sz = struct_size(job, ops, nr_ops); + + if (sz > SIZE_MAX) + return ERR_PTR(-ENOMEM); + + job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); + if (!job) + return ERR_PTR(-ENOMEM); + + ret = drm_sched_job_init(&job->base, queue->entity, 1, queue); + if (ret) { + kfree(job); + return ERR_PTR(ret); + } + + job->vm = msm_context_vm(dev, queue->ctx); + job->queue = queue; + INIT_LIST_HEAD(&job->vm_ops); + + return job; +} + +static bool invalid_alignment(uint64_t addr) +{ + /* + * Technically this is about GPU alignment, not CPU alignment. But + * I've not seen any qcom SoC where the SMMU does not support the + * CPU's smallest page size. + */ + return !PAGE_ALIGNED(addr); +}
- if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { - drm_gem_object_get(obj); - msm_gem_lock(obj); +static int +lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) +{ + struct drm_device *dev = job->vm->drm; + struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu; + int i = job->nr_ops++; + int ret = 0; + + job->ops[i].op = op->op; + job->ops[i].handle = op->handle; + job->ops[i].obj_offset = op->obj_offset; + job->ops[i].iova = op->iova; + job->ops[i].range = op->range; + job->ops[i].flags = op->flags; + + if (op->flags & ~MSM_VM_BIND_OP_FLAGS) + ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags); + + if (invalid_alignment(op->iova)) + ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova); + + if (invalid_alignment(op->obj_offset)) + ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset); + + if (invalid_alignment(op->range)) + ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range); + + + /* + * MAP must specify a valid handle. But the handle MBZ for + * UNMAP or MAP_NULL. + */ + if (op->op == MSM_VM_BIND_OP_MAP) { + if (!op->handle) + ret = UERR(EINVAL, dev, "invalid handle\n"); + } else if (op->handle) { + ret = UERR(EINVAL, dev, "handle must be zero\n"); + } + + switch (op->op) { + case MSM_VM_BIND_OP_MAP: + case MSM_VM_BIND_OP_MAP_NULL: + mmu->funcs->prealloc_count(mmu, &job->prealloc, op->iova, op->range); + break; + case MSM_VM_BIND_OP_UNMAP: + break; + default: + ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op); + break; + } + + return ret; +} + +/* + * ioctl parsing, parameter validation, and GEM handle lookup + */ +static int +vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args, + struct drm_file *file, int *nr_bos) +{ + struct drm_device *dev = job->vm->drm; + int ret = 0; + int cnt = 0; + + if (args->nr_ops == 1) { + /* Single op case, the op is inlined: */ + ret = lookup_op(job, &args->op); + } else { + for (unsigned i = 0; i < args->nr_ops; i++) { + struct drm_msm_vm_bind_op op; + void __user *userptr = + u64_to_user_ptr(args->ops + (i * sizeof(op))); + + /* make sure we don't have garbage flags, in case we hit + * error path before flags is initialized: + */ + job->ops[i].flags = 0; + + if (copy_from_user(&op, userptr, sizeof(op))) { + ret = -EFAULT; + break; + } + + ret = lookup_op(job, &op); + if (ret) + break; } + } + + if (ret) { + job->nr_ops = 0; + goto out; + } + + spin_lock(&file->table_lock); + + for (unsigned i = 0; i < args->nr_ops; i++) { + struct drm_gem_object *obj;
- msm_gem_vma_unmap(vma); - msm_gem_vma_close(vma); + if (!job->ops[i].handle) { + job->ops[i].obj = NULL; + continue; + }
- if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { - msm_gem_unlock(obj); - drm_gem_object_put(obj); + /* + * normally use drm_gem_object_lookup(), but for bulk lookup + * all under single table_lock just hit object_idr directly: + */ + obj = idr_find(&file->object_idr, job->ops[i].handle); + if (!obj) { + ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i); + goto out_unlock; } + + drm_gem_object_get(obj); + + job->ops[i].obj = obj; + cnt++; } - dma_resv_unlock(drm_gpuvm_resv(gpuvm)); + + *nr_bos = cnt; + +out_unlock: + spin_unlock(&file->table_lock); + +out: + return ret; +} + +/* + * Lock VM and GEM objects + */ +static int +vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) +{ + struct drm_gem_object *obj; + int ret; + + /* Lock VM and objects: */ + drm_exec_until_all_locked(exec) { + ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm)); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + + job_foreach_bo (obj, job) { + ret = drm_exec_prepare_obj(exec, obj, 1); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + } + } + + return 0; +} + +/* + * Pin GEM objects, ensuring that we have backing pages. Pinning will move + * the object to the pinned LRU so that the shrinker knows to first consider + * other objects for evicting. + */ +static int +vm_bind_job_pin_objects(struct msm_vm_bind_job *job) +{ + struct drm_gem_object *obj; + + /* + * First loop, before holding the LRU lock, avoids holding the + * LRU lock while calling msm_gem_pin_vma_locked (which could + * trigger get_pages()) + */ + job_foreach_bo (obj, job) { + struct page **pages; + + pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED); + if (IS_ERR(pages)) + return PTR_ERR(pages); + } + + struct msm_drm_private *priv = job->vm->drm->dev_private; + + /* + * A second loop while holding the LRU lock (a) avoids acquiring/dropping + * the LRU lock for each individual bo, while (b) avoiding holding the + * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger + * get_pages() which could trigger reclaim.. and if we held the LRU lock + * could trigger deadlock with the shrinker). + */ + mutex_lock(&priv->lru.lock); + job_foreach_bo (obj, job) + msm_gem_pin_obj_locked(obj); + mutex_unlock(&priv->lru.lock); + + job->bos_pinned = true; + + return 0; +} + +/* + * Unpin GEM objects. Normally this is done after the bind job is run. + */ +static void +vm_bind_job_unpin_objects(struct msm_vm_bind_job *job) +{ + struct drm_gem_object *obj; + + if (!job->bos_pinned) + return; + + job_foreach_bo (obj, job) + msm_gem_unpin_locked(obj); + + job->bos_pinned = false; +} + +/* + * Pre-allocate pgtable memory, and translate the VM bind requests into a + * sequence of pgtable updates to be applied asynchronously. + */ +static int +vm_bind_job_prepare(struct msm_vm_bind_job *job) +{ + struct msm_gem_vm *vm = to_msm_vm(job->vm); + struct msm_mmu *mmu = vm->mmu; + int ret; + + mmu->funcs->prealloc_allocate(mmu, &job->prealloc); + + for (unsigned i = 0; i < job->nr_ops; i++) { + const struct msm_vm_bind_op *op = &job->ops[i]; + struct op_arg arg = { + .job = job, + }; + + switch (op->op) { + case MSM_VM_BIND_OP_UNMAP: + ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova, + op->obj_offset); + break; + case MSM_VM_BIND_OP_MAP: + if (op->flags & MSM_VM_BIND_OP_DUMP) + arg.flags |= MSM_VMA_DUMP; + fallthrough; + case MSM_VM_BIND_OP_MAP_NULL: + ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, + op->range, op->obj, op->obj_offset); + break; + default: + /* + * lookup_op() should have already thrown an error for + * invalid ops + */ + BUG_ON("unreachable"); + } + + if (ret) { + /* + * If we've already started modifying the vm, we can't + * adequetly describe to userspace the intermediate + * state the vm is in. So throw up our hands! + */ + if (i > 0) + vm->unusable = true; + return ret; + } + } + + return 0; +} + +/* + * Attach fences to the GEM objects being bound. This will signify to + * the shrinker that they are busy even after dropping the locks (ie. + * drm_exec_fini()) + */ +static void +vm_bind_job_attach_fences(struct msm_vm_bind_job *job) +{ + for (unsigned i = 0; i < job->nr_ops; i++) { + struct drm_gem_object *obj = job->ops[i].obj; + + if (!obj) + continue; + + dma_resv_add_fence(obj->resv, job->fence, + DMA_RESV_USAGE_KERNEL); + } +} + +int +msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_vm_bind *args = data; + struct msm_context *ctx = file->driver_priv; + struct msm_vm_bind_job *job = NULL; + struct msm_gpu *gpu = priv->gpu; + struct msm_gpu_submitqueue *queue; + struct msm_syncobj_post_dep *post_deps = NULL; + struct drm_syncobj **syncobjs_to_reset = NULL; + struct dma_fence *fence; + int out_fence_fd = -1; + int ret, nr_bos = 0; + unsigned i; + + if (!gpu) + return -ENXIO; + + /* + * Maybe we could allow just UNMAP ops? OTOH userspace should just + * immediately close the device file and all will be torn down. + */ + if (to_msm_vm(ctx->vm)->unusable) + return UERR(EPIPE, dev, "context is unusable"); + + /* + * Technically, you cannot create a VM_BIND submitqueue in the first + * place, if you haven't opted in to VM_BIND context. But it is + * cleaner / less confusing, to check this case directly. + */ + if (!msm_context_is_vmbind(ctx)) + return UERR(EINVAL, dev, "context does not support vmbind"); + + if (args->flags & ~MSM_VM_BIND_FLAGS) + return UERR(EINVAL, dev, "invalid flags"); + + queue = msm_submitqueue_get(ctx, args->queue_id); + if (!queue) + return -ENOENT; + + if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) { + ret = UERR(EINVAL, dev, "Invalid queue type"); + goto out_post_unlock; + } + + if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) { + ret = out_fence_fd; + goto out_post_unlock; + } + } + + job = vm_bind_job_create(dev, gpu, queue, args->nr_ops); + if (IS_ERR(job)) { + ret = PTR_ERR(job); + goto out_post_unlock; + } + + ret = mutex_lock_interruptible(&queue->lock); + if (ret) + goto out_post_unlock; + + if (args->flags & MSM_VM_BIND_FENCE_FD_IN) { + struct dma_fence *in_fence; + + in_fence = sync_file_get_fence(args->fence_fd); + + if (!in_fence) { + ret = UERR(EINVAL, dev, "invalid in-fence"); + goto out_unlock; + } + + ret = drm_sched_job_add_dependency(&job->base, in_fence); + if (ret) + goto out_unlock; + } + + if (args->in_syncobjs > 0) { + syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base, + file, args->in_syncobjs, + args->nr_in_syncobjs, + args->syncobj_stride); + if (IS_ERR(syncobjs_to_reset)) { + ret = PTR_ERR(syncobjs_to_reset); + goto out_unlock; + } + } + + if (args->out_syncobjs > 0) { + post_deps = msm_syncobj_parse_post_deps(dev, file, + args->out_syncobjs, + args->nr_out_syncobjs, + args->syncobj_stride); + if (IS_ERR(post_deps)) { + ret = PTR_ERR(post_deps); + goto out_unlock; + } + } + + ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos); + if (ret) + goto out_unlock; + + struct drm_exec exec; + unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT; + drm_exec_init(&exec, flags, nr_bos + 1); + + ret = vm_bind_job_lock_objects(job, &exec); + if (ret) + goto out; + + ret = vm_bind_job_pin_objects(job); + if (ret) + goto out; + + ret = vm_bind_job_prepare(job); + if (ret) + goto out; + + drm_sched_job_arm(&job->base); + + job->fence = dma_fence_get(&job->base.s_fence->finished); + + if (ret == 0 && args->flags & MSM_VM_BIND_FENCE_FD_OUT) { + struct sync_file *sync_file = sync_file_create(job->fence); + if (!sync_file) { + ret = -ENOMEM; + } else { + fd_install(out_fence_fd, sync_file->file); + args->fence_fd = out_fence_fd; + } + } + + if (ret) + goto out; + + vm_bind_job_attach_fences(job); + + /* + * The job can be free'd (and fence unref'd) at any point after + * drm_sched_entity_push_job(), so we need to hold our own ref + */ + fence = dma_fence_get(job->fence); + + drm_sched_entity_push_job(&job->base); + + msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs); + msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence); + + dma_fence_put(fence); + +out: + if (ret) + vm_bind_job_unpin_objects(job); + drm_exec_fini(&exec); +out_unlock: + mutex_unlock(&queue->lock); +out_post_unlock: + if (ret && (out_fence_fd >= 0)) + put_unused_fd(out_fence_fd); + + if (!IS_ERR_OR_NULL(job)) { + if (ret) + msm_vma_job_free(&job->base); + } else { + /* + * If the submit hasn't yet taken ownership of the queue + * then we need to drop the reference ourself: + */ + msm_submitqueue_put(queue); + } + + if (!IS_ERR_OR_NULL(post_deps)) { + for (i = 0; i < args->nr_out_syncobjs; ++i) { + kfree(post_deps[i].chain); + drm_syncobj_put(post_deps[i].syncobj); + } + kfree(post_deps); + } + + if (!IS_ERR_OR_NULL(syncobjs_to_reset)) { + for (i = 0; i < args->nr_in_syncobjs; ++i) { + if (syncobjs_to_reset[i]) + drm_syncobj_put(syncobjs_to_reset[i]); + } + kfree(syncobjs_to_reset); + } + + return ret; } diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 6d6cd1219926..5c67294edc95 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -272,7 +272,10 @@ struct drm_msm_gem_submit_cmd { __u32 size; /* in, cmdstream size */ __u32 pad; __u32 nr_relocs; /* in, number of submit_reloc's */ - __u64 relocs; /* in, ptr to array of submit_reloc's */ + union { + __u64 relocs; /* in, ptr to array of submit_reloc's */ + __u64 iova; /* cmdstream address (for VM_BIND contexts) */ + }; };
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the @@ -339,7 +342,74 @@ struct drm_msm_gem_submit { __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */ __u32 syncobj_stride; /* in, stride of syncobj arrays. */ __u32 pad; /*in, reserved for future use, always 0. */ +}; + +#define MSM_VM_BIND_OP_UNMAP 0 +#define MSM_VM_BIND_OP_MAP 1 +#define MSM_VM_BIND_OP_MAP_NULL 2 + +#define MSM_VM_BIND_OP_DUMP 1 +#define MSM_VM_BIND_OP_FLAGS ( \ + MSM_VM_BIND_OP_DUMP | \ + 0)
+/** + * struct drm_msm_vm_bind_op - bind/unbind op to run + */ +struct drm_msm_vm_bind_op { + /** @op: one of MSM_VM_BIND_OP_x */ + __u32 op; + /** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */ + __u32 handle; + /** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */ + __u64 obj_offset; + /** @iova: Address to operate on */ + __u64 iova; + /** @range: Number of bites to to map/unmap */ + __u64 range; + /** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */ + __u32 flags; + /** @pad: MBZ */ + __u32 pad; +}; + +#define MSM_VM_BIND_FENCE_FD_IN 0x00000001 +#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002 +#define MSM_VM_BIND_FLAGS ( \ + MSM_VM_BIND_FENCE_FD_IN | \ + MSM_VM_BIND_FENCE_FD_OUT | \ + 0) + +/** + * struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND + */ +struct drm_msm_vm_bind { + /** @flags: in, bitmask of MSM_VM_BIND_x */ + __u32 flags; + /** @nr_ops: the number of bind ops in this ioctl */ + __u32 nr_ops; + /** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */ + __s32 fence_fd; + /** @queue_id: in, submitqueue id */ + __u32 queue_id; + /** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */ + __u64 in_syncobjs; + /** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */ + __u64 out_syncobjs; + /** @nr_in_syncobjs: in, number of entries in in_syncobj */ + __u32 nr_in_syncobjs; + /** @nr_out_syncobjs: in, number of entries in out_syncobj */ + __u32 nr_out_syncobjs; + /** @syncobj_stride: in, stride of syncobj arrays */ + __u32 syncobj_stride; + /** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */ + __u32 op_stride; + union { + /** @op: used if num_ops == 1 */ + struct drm_msm_vm_bind_op op; + /** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */ + __u64 ops; + }; };
#define MSM_WAIT_FENCE_BOOST 0x00000001 @@ -435,6 +505,7 @@ struct drm_msm_submitqueue_query { #define DRM_MSM_SUBMITQUEUE_NEW 0x0A #define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B #define DRM_MSM_SUBMITQUEUE_QUERY 0x0C +#define DRM_MSM_VM_BIND 0x0D
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param) @@ -448,6 +519,7 @@ struct drm_msm_submitqueue_query { #define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue) #define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32) #define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query) +#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind)
#if defined(__cplusplus) }
linaro-mm-sig@lists.linaro.org