[ Sasha's backport helper bot ]
Hi,
✅ All tests passed successfully. No issues detected. No action required from the submitter.
The upstream commit SHA1 provided is correct: 5bbccadaf33eea2b879d8326ad59ae0663be47d1
WARNING: Author mismatch between patch and upstream commit: Backport author: Jacek Lawrynowiczjacek.lawrynowicz@linux.intel.com Commit author: Karol Wachowskikarol.wachowski@intel.com
Note: The patch differs from the upstream commit: --- 1: 5bbccadaf33ee ! 1: ac0a38707ed72 accel/ivpu: Abort all jobs after command queue unregister @@ Metadata ## Commit message ## accel/ivpu: Abort all jobs after command queue unregister
+ commit 5bbccadaf33eea2b879d8326ad59ae0663be47d1 upstream. + With hardware scheduler it is not expected to receive JOB_DONE notifications from NPU FW for the jobs aborted due to command queue destroy JSM command. @@ Commit message especially when destruction of the last job of a specific context results in context release.
+ Cc: stable@vger.kernel.org # v6.14 Signed-off-by: Karol Wachowski karol.wachowski@intel.com Signed-off-by: Maciej Falkowski maciej.falkowski@linux.intel.com Reviewed-by: Jacek Lawrynowicz jacek.lawrynowicz@linux.intel.com @@ drivers/accel/ivpu/ivpu_drv.h: struct ivpu_device {
## drivers/accel/ivpu/ivpu_job.c ## -@@ drivers/accel/ivpu/ivpu_job.c: static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p - - cmdq->priority = priority; - cmdq->is_legacy = is_legacy; -+ cmdq->is_valid = true; - - ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit, - &file_priv->cmdq_id_next, GFP_KERNEL); -@@ drivers/accel/ivpu/ivpu_job.c: static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p - goto err_free_cmdq; - } - -+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d\n", cmdq->id, file_priv->ctx.id); - return cmdq; - - err_free_cmdq: -@@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm +@@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id); if (!ret) @@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_cmdq_unregister(struct ivpu_file_ }
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id); -@@ drivers/accel/ivpu/ivpu_job.c: static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 - lockdep_assert_held(&file_priv->lock); - - cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id); -- if (!cmdq) { -- ivpu_err(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); -+ if (!cmdq || !cmdq->is_valid) { -+ ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); - return NULL; - } - -@@ drivers/accel/ivpu/ivpu_job.c: void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) - mutex_unlock(&vdev->context_list_lock); - } - --static void ivpu_cmdq_unregister_all(struct ivpu_file_priv *file_priv) --{ -- struct ivpu_cmdq *cmdq; -- unsigned long cmdq_id; -- -- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) -- ivpu_cmdq_unregister(file_priv, cmdq); --} -- - void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv) - { - struct ivpu_device *vdev = file_priv->vdev; -+ struct ivpu_cmdq *cmdq; -+ unsigned long cmdq_id; - - lockdep_assert_held(&file_priv->lock); - -- ivpu_cmdq_unregister_all(file_priv); -+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) -+ ivpu_cmdq_unregister(file_priv, cmdq); +@@ drivers/accel/ivpu/ivpu_job.c: void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) ivpu_jsm_context_release(vdev, file_priv->ctx.id); @@ drivers/accel/ivpu/ivpu_job.c: void ivpu_jobs_abort_all(struct ivpu_device *vdev xa_for_each(&vdev->submitted_jobs_xa, id, job) ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED); + -+ mutex_unlock(&vdev->submitted_jobs_lock); -+} -+ -+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id) -+{ -+ struct ivpu_job *job; -+ unsigned long id; -+ -+ mutex_lock(&vdev->submitted_jobs_lock); -+ -+ xa_for_each(&vdev->submitted_jobs_xa, id, job) -+ if (job->file_priv->ctx.id == ctx_id && job->cmdq_id == cmdq_id) -+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED); -+ + mutex_unlock(&vdev->submitted_jobs_lock); }
- static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) -@@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) + static int ivpu_job_submit(struct ivpu_job *job, u8 priority) +@@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_job_submit(struct ivpu_job *job, u8 priority) goto err_unlock_file_priv; }
- xa_lock(&vdev->submitted_jobs_xa); -+ job->cmdq_id = cmdq->id; -+ + mutex_lock(&vdev->submitted_jobs_lock); + is_first_job = xa_empty(&vdev->submitted_jobs_xa); @@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_job_submit(struct ivpu_job *job, }
ret = ivpu_cmdq_push_job(cmdq, job); -@@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) - job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->priority, +@@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_job_submit(struct ivpu_job *job, u8 priority) + job->job_id, file_priv->ctx.id, job->engine_idx, priority, job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
- xa_unlock(&vdev->submitted_jobs_xa); @@ drivers/accel/ivpu/ivpu_job.c: static int ivpu_job_submit(struct ivpu_job *job, err_unlock_file_priv: mutex_unlock(&file_priv->lock); ivpu_rpm_put(vdev); -@@ drivers/accel/ivpu/ivpu_job.c: int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * - int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file) - { - struct ivpu_file_priv *file_priv = file->driver_priv; -+ struct ivpu_device *vdev = file_priv->vdev; - struct drm_ivpu_cmdq_destroy *args = data; - struct ivpu_cmdq *cmdq; -+ u32 cmdq_id; - int ret = 0; - - mutex_lock(&file_priv->lock); - - cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id); -- if (!cmdq || cmdq->is_legacy) { -+ if (!cmdq || !cmdq->is_valid || cmdq->is_legacy) { - ret = -ENOENT; - goto unlock; - } - -+ /* -+ * There is no way to stop executing jobs per command queue -+ * in OS scheduling mode, mark command queue as invalid instead -+ * and it will be freed together with context release. -+ */ -+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) { -+ cmdq->is_valid = false; -+ goto unlock; -+ } -+ -+ cmdq_id = cmdq->id; - ivpu_cmdq_destroy(file_priv, cmdq); -+ ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id); - unlock: - mutex_unlock(&file_priv->lock); - return ret; @@ drivers/accel/ivpu/ivpu_job.c: ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) { @@ drivers/accel/ivpu/ivpu_job.c: void ivpu_job_done_consumer_fini(struct ivpu_devi +}
## drivers/accel/ivpu/ivpu_job.h ## -@@ drivers/accel/ivpu/ivpu_job.h: struct ivpu_cmdq { - u32 id; - u32 db_id; - u8 priority; -+ bool is_valid; - bool is_legacy; - }; - -@@ drivers/accel/ivpu/ivpu_job.h: struct ivpu_job { - struct ivpu_file_priv *file_priv; - struct dma_fence *done_fence; - u64 cmd_buf_vpu_addr; -+ u32 cmdq_id; - u32 job_id; - u32 engine_idx; - size_t bo_count; -@@ drivers/accel/ivpu/ivpu_job.h: void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv); - - void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv); - void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); -+void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id); +@@ drivers/accel/ivpu/ivpu_job.h: void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev); void ivpu_job_done_consumer_fini(struct ivpu_device *vdev); ---
Results of testing on various branches:
| Branch | Patch Apply | Build Test | |---------------------------|-------------|------------| | stable/linux-5.4.y | Success | Success |