Skip to content

Commit

Permalink
accel/ivpu: Abort all jobs after command queue unregister
Browse files Browse the repository at this point in the history
commit 5bbccad upstream.

With hardware scheduler it is not expected to receive JOB_DONE
notifications from NPU FW for the jobs aborted due to command queue destroy
JSM command.

Remove jobs submitted to unregistered command queue from submitted_jobs_xa
to avoid triggering a TDR in such case.

Add explicit submitted_jobs_lock that protects access to list of submitted
jobs which is now used to find jobs to abort.

Move context abort procedure to separate work queue not to slow down
handling of IPCs or DCT requests in case where job abort takes longer,
especially when destruction of the last job of a specific context results
in context release.

Signed-off-by: Karol Wachowski <karol.wachowski@intel.com>
Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250107173238.381120-4-maciej.falkowski@linux.intel.com
[ This backport removes all the lines from upstream commit related to the
  command queue UAPI, as it is not present in the 6.14 kernel and should
  not be backported. ]
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Karol Wachowski authored and Greg Kroah-Hartman committed May 9, 2025
1 parent 01db0e1 commit 437b1eb
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 49 deletions.
32 changes: 6 additions & 26 deletions drivers/accel/ivpu/ivpu_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@
#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif

static struct lock_class_key submitted_jobs_xa_lock_class_key;

int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
Expand Down Expand Up @@ -465,26 +463,6 @@ static const struct drm_driver driver = {
.major = 1,
};

static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;

mutex_lock(&vdev->context_list_lock);

xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
if (!file_priv->has_mmu_faults || file_priv->aborted)
continue;

mutex_lock(&file_priv->lock);
ivpu_context_abort_locked(file_priv);
file_priv->aborted = true;
mutex_unlock(&file_priv->lock);
}

mutex_unlock(&vdev->context_list_lock);
}

static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
{
struct ivpu_device *vdev = arg;
Expand All @@ -498,9 +476,6 @@ static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
case IVPU_HW_IRQ_SRC_IPC:
ivpu_ipc_irq_thread_handler(vdev);
break;
case IVPU_HW_IRQ_SRC_MMU_EVTQ:
ivpu_context_abort_invalid(vdev);
break;
case IVPU_HW_IRQ_SRC_DCT:
ivpu_pm_dct_irq_thread_handler(vdev);
break;
Expand Down Expand Up @@ -617,16 +592,21 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);

vdev->db_limit.min = IVPU_MIN_DB;
vdev->db_limit.max = IVPU_MAX_DB;

INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_thread_handler);

ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
goto err_xa_destroy;

ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
if (ret)
goto err_xa_destroy;

ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
Expand Down
2 changes: 2 additions & 0 deletions drivers/accel/ivpu/ivpu_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ struct ivpu_device {
struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa;
struct xa_limit context_xa_limit;
struct work_struct context_abort_work;

struct xarray db_xa;
struct xa_limit db_limit;
Expand All @@ -145,6 +146,7 @@ struct ivpu_device {
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;

struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;

Expand Down
85 changes: 66 additions & 19 deletions drivers/accel/ivpu/ivpu_job.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,8 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
cmdq->id, file_priv->ctx.id);
}

ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
Expand Down Expand Up @@ -324,6 +325,8 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)

if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);

file_priv->aborted = true;
}

static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
Expand Down Expand Up @@ -462,23 +465,23 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
{
struct ivpu_job *job;

xa_lock(&vdev->submitted_jobs_xa);
job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
lockdep_assert_held(&vdev->submitted_jobs_lock);

job = xa_erase(&vdev->submitted_jobs_xa, job_id);
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
vdev->busy_time);
}

xa_unlock(&vdev->submitted_jobs_xa);

return job;
}

static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
struct ivpu_job *job;

lockdep_assert_held(&vdev->submitted_jobs_lock);

job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
if (!job)
return -ENOENT;
Expand All @@ -497,6 +500,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
ivpu_stop_job_timeout_detection(vdev);

ivpu_rpm_put(vdev);

if (!xa_empty(&vdev->submitted_jobs_xa))
ivpu_start_job_timeout_detection(vdev);

return 0;
}

Expand All @@ -505,8 +512,12 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
struct ivpu_job *job;
unsigned long id;

mutex_lock(&vdev->submitted_jobs_lock);

xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);

mutex_unlock(&vdev->submitted_jobs_lock);
}

static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
Expand All @@ -531,15 +542,16 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
goto err_unlock_file_priv;
}

xa_lock(&vdev->submitted_jobs_xa);
mutex_lock(&vdev->submitted_jobs_lock);

is_first_job = xa_empty(&vdev->submitted_jobs_xa);
ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
&file_priv->job_id_next, GFP_KERNEL);
ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
&file_priv->job_id_next, GFP_KERNEL);
if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
goto err_unlock_submitted_jobs_xa;
goto err_unlock_submitted_jobs;
}

ret = ivpu_cmdq_push_job(cmdq, job);
Expand All @@ -562,19 +574,21 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);

xa_unlock(&vdev->submitted_jobs_xa);

mutex_unlock(&vdev->submitted_jobs_lock);
mutex_unlock(&file_priv->lock);

if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
mutex_lock(&vdev->submitted_jobs_lock);
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
mutex_unlock(&vdev->submitted_jobs_lock);
}

return 0;

err_erase_xa:
__xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_unlock_submitted_jobs_xa:
xa_unlock(&vdev->submitted_jobs_xa);
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_unlock_submitted_jobs:
mutex_unlock(&vdev->submitted_jobs_lock);
err_unlock_file_priv:
mutex_unlock(&file_priv->lock);
ivpu_rpm_put(vdev);
Expand Down Expand Up @@ -745,7 +759,6 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
int ret;

if (!jsm_msg) {
ivpu_err(vdev, "IPC message has no JSM payload\n");
Expand All @@ -758,9 +771,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}

payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
ivpu_start_job_timeout_detection(vdev);

mutex_lock(&vdev->submitted_jobs_lock);
ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
mutex_unlock(&vdev->submitted_jobs_lock);
}

void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
Expand All @@ -773,3 +787,36 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}

void ivpu_context_abort_thread_handler(struct work_struct *work)
{
struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
struct ivpu_job *job;
unsigned long id;

mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
if (!file_priv->has_mmu_faults || file_priv->aborted)
continue;

mutex_lock(&file_priv->lock);
ivpu_context_abort_locked(file_priv);
mutex_unlock(&file_priv->lock);
}
mutex_unlock(&vdev->context_list_lock);

if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources
* and notify userspace
*/
mutex_lock(&vdev->submitted_jobs_lock);
xa_for_each(&vdev->submitted_jobs_xa, id, job)
if (job->file_priv->aborted)
ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
mutex_unlock(&vdev->submitted_jobs_lock);
}
1 change: 1 addition & 0 deletions drivers/accel/ivpu/ivpu_job.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);

void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
void ivpu_context_abort_thread_handler(struct work_struct *work);

void ivpu_jobs_abort_all(struct ivpu_device *vdev);

Expand Down
3 changes: 1 addition & 2 deletions drivers/accel/ivpu/ivpu_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -890,8 +890,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}

if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
queue_work(system_wq, &vdev->context_abort_work);
}

void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
Expand Down
5 changes: 3 additions & 2 deletions drivers/accel/ivpu/ivpu_sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,12 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
struct ivpu_device *vdev = to_ivpu_device(drm);
ktime_t total, now = 0;

xa_lock(&vdev->submitted_jobs_xa);
mutex_lock(&vdev->submitted_jobs_lock);

total = vdev->busy_time;
if (!xa_empty(&vdev->submitted_jobs_xa))
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
xa_unlock(&vdev->submitted_jobs_xa);
mutex_unlock(&vdev->submitted_jobs_lock);

return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
}
Expand Down

0 comments on commit 437b1eb

Please sign in to comment.