Skip to content

Commit

Permalink
Merge tag 'drm-xe-fixes-2024-10-24-1' of https://gitlab.freedesktop.o…
Browse files Browse the repository at this point in the history
…rg/drm/xe/kernel into drm-fixes

Driver Changes:
- Increase invalidation timeout to avoid errors in some hosts (Shuicheng)
- Flush worker on timeout (Badal)
- Better handling for force wake failure (Shuicheng)
- Improve argument check on user fence creation (Nirmoy)
- Don't restart parallel queues multiple times on GT reset (Nirmoy)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/trlkoiewtc4x2cyhsxmj3atayyq4zwto4iryea5pvya2ymc3yp@fdx5nhwmiyem
  • Loading branch information
Dave Airlie committed Oct 25, 2024
2 parents e3e1cfe + cdc2102 commit 4d95a12
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 7 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/xe/xe_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe)
spin_lock(&gt->global_invl_lock);
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);

if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);

Expand Down
12 changes: 9 additions & 3 deletions drivers/gpu/drm/xe/xe_force_wake.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
domain->reg_ack.addr, value);
xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
domain->reg_ack.addr, value);
if (value == ~0) {
xe_gt_err(gt,
"Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
domain->id, str_wake_sleep(wake));
ret = -EIO;
}

return ret;
}
Expand Down
18 changes: 18 additions & 0 deletions drivers/gpu/drm/xe/xe_guc_ct.c
Original file line number Diff line number Diff line change
Expand Up @@ -897,6 +897,24 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,

ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);

/*
* Occasionally it is seen that the G2H worker starts running after a delay of more than
* a second even after being queued and activated by the Linux workqueue subsystem. This
* leads to G2H timeout error. The root cause of issue lies with scheduling latency of
* Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
* and this is beyond xe kmd.
*
* TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
*/
if (!ret) {
flush_work(&ct->g2h_worker);
if (g2h_fence.done) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
ret = 1;
}
}

/*
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
* the stack, since we have no clue if it will fire after the timeout before we can erase
Expand Down
14 changes: 12 additions & 2 deletions drivers/gpu/drm/xe/xe_guc_submit.c
Original file line number Diff line number Diff line change
Expand Up @@ -1726,8 +1726,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)

mutex_lock(&guc->submission_state.lock);

xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
/* Prevent redundant attempts to stop parallel queues */
if (q->guc->id != index)
continue;

guc_exec_queue_stop(guc, q);
}

mutex_unlock(&guc->submission_state.lock);

Expand Down Expand Up @@ -1765,8 +1770,13 @@ int xe_guc_submit_start(struct xe_guc *guc)

mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
/* Prevent redundant attempts to start parallel queues */
if (q->guc->id != index)
continue;

guc_exec_queue_start(q);
}
mutex_unlock(&guc->submission_state.lock);

wake_up_all(&guc->ct.wq);
Expand Down
3 changes: 2 additions & 1 deletion drivers/gpu/drm/xe/xe_sync.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
{
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
u64 __maybe_unused prefetch_val;

if (!access_ok(ptr, sizeof(*ptr)))
if (get_user(prefetch_val, ptr))
return ERR_PTR(-EFAULT);

ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
Expand Down

0 comments on commit 4d95a12

Please sign in to comment.