- Fix missing alloc log when drm_gem_handle_create() fails in drm_vma_node_allow() and open callback is not called - Add ivpu_bo->ctx_id that enables to log the actual context id instead of using 0 as default - Add couple WARNs and errors so we can catch more memory corruption issues
Fixes: 37dee2a2f433 ("accel/ivpu: Improve buffer object debug logs") Cc: stable@vger.kernel.org # v6.8+ Signed-off-by: Jacek Lawrynowicz jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_gem.c | 25 +++++++++++++++++-------- drivers/accel/ivpu/ivpu_gem.h | 1 + 2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index e0d242d9f3e50..a76cbf4761f8c 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -28,7 +28,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con { ivpu_dbg(vdev, BO, "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", - action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0, + action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id, (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, (bool)drm_gem_is_imported(&bo->base.base)); } @@ -94,8 +94,6 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); }
- ivpu_dbg_bo(vdev, bo, "alloc"); - mutex_unlock(&bo->lock);
drm_dev_exit(idx); @@ -215,7 +213,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, return ERR_PTR(ret); }
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) +static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) { struct drm_gem_shmem_object *shmem; struct ivpu_bo *bo; @@ -233,6 +231,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base); + bo->ctx_id = ctx_id; bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags;
@@ -240,6 +239,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock);
+ ivpu_dbg_bo(vdev, bo, "alloc"); + return bo; }
@@ -278,8 +279,13 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); + drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0); + drm_WARN_ON(&vdev->drm, bo->base.vaddr);
ivpu_bo_unbind_locked(bo); + drm_WARN_ON(&vdev->drm, bo->mmu_mapped); + drm_WARN_ON(&vdev->drm, bo->ctx); + mutex_destroy(&bo->lock);
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); @@ -314,7 +320,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi if (size == 0) return -EINVAL;
- bo = ivpu_bo_alloc(vdev, size, args->flags); + bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", bo, file_priv->ctx.id, args->size, args->flags); @@ -322,7 +328,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi }
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); - if (!ret) + if (ret) + ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", + bo, file_priv->ctx.id, args->size, args->flags); + else args->vpu_addr = bo->vpu_addr;
drm_gem_object_put(&bo->base.base); @@ -345,7 +354,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags); + bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", bo, range->start, size, flags); @@ -452,7 +461,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) mutex_lock(&bo->lock);
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u", - bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size, + bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size, bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages) diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index a222a9ec9d611..0c93118c85bd3 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -21,6 +21,7 @@ struct ivpu_bo { u64 vpu_addr; u32 flags; u32 job_status; /* Valid only for command buffer */ + u32 ctx_id; bool mmu_mapped; };
On 5/6/25 02:13, Jacek Lawrynowicz wrote:
- Fix missing alloc log when drm_gem_handle_create() fails in drm_vma_node_allow() and open callback is not called
- Add ivpu_bo->ctx_id that enables to log the actual context id instead of using 0 as default
- Add couple WARNs and errors so we can catch more memory corruption issues
Fixes: 37dee2a2f433 ("accel/ivpu: Improve buffer object debug logs") Cc: stable@vger.kernel.org # v6.8+ Signed-off-by: Jacek Lawrynowicz jacek.lawrynowicz@linux.intel.com
drivers/accel/ivpu/ivpu_gem.c | 25 +++++++++++++++++-------- drivers/accel/ivpu/ivpu_gem.h | 1 + 2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index e0d242d9f3e50..a76cbf4761f8c 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -28,7 +28,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con { ivpu_dbg(vdev, BO, "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, (bool)drm_gem_is_imported(&bo->base.base)); }action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
@@ -94,8 +94,6 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); }
- ivpu_dbg_bo(vdev, bo, "alloc");
- mutex_unlock(&bo->lock);
drm_dev_exit(idx); @@ -215,7 +213,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, return ERR_PTR(ret); } -static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) +static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) { struct drm_gem_shmem_object *shmem; struct ivpu_bo *bo; @@ -233,6 +231,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_CAST(shmem); bo = to_ivpu_bo(&shmem->base);
- bo->ctx_id = ctx_id; bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags;
@@ -240,6 +239,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg_bo(vdev, bo, "alloc");
- return bo; }
@@ -278,8 +279,13 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) mutex_unlock(&vdev->bo_list_lock); drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
- drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
- drm_WARN_ON(&vdev->drm, bo->base.vaddr);
ivpu_bo_unbind_locked(bo);
- drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
- drm_WARN_ON(&vdev->drm, bo->ctx);
- mutex_destroy(&bo->lock);
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); @@ -314,7 +320,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi if (size == 0) return -EINVAL;
- bo = ivpu_bo_alloc(vdev, size, args->flags);
- bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", bo, file_priv->ctx.id, args->size, args->flags);
@@ -322,7 +328,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi } ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (!ret)
- if (ret)
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
- else args->vpu_addr = bo->vpu_addr;
drm_gem_object_put(&bo->base.base); @@ -345,7 +354,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags);
- bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", bo, range->start, size, flags);
@@ -452,7 +461,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) mutex_lock(&bo->lock); drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
if (bo->base.pages) diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index a222a9ec9d611..0c93118c85bd3 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -21,6 +21,7 @@ struct ivpu_bo { u64 vpu_addr; u32 flags; u32 job_status; /* Valid only for command buffer */
- u32 ctx_id;
Reviewed-by: Lizhi Hou lizhi.hou@amd.com
bool mmu_mapped; };
linux-stable-mirror@lists.linaro.org