Skip to content

Commit

Permalink
drm/i915/guc: Use a local cancel_port_requests
Browse files Browse the repository at this point in the history
Since execlists and the guc have diverged in their port tracking, we
cannot simply reuse the execlists cancellation code as it leads to
unbalanced reference counting. Use a local, simpler routine for the guc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190812203626.3948-1-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Aug 13, 2019
1 parent 478ffad commit 5f15c1e
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 16 deletions.
3 changes: 0 additions & 3 deletions drivers/gpu/drm/i915/gt/intel_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,9 +136,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return READ_ONCE(*execlists->active);
}

void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);

struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);

Expand Down
6 changes: 3 additions & 3 deletions drivers/gpu/drm/i915/gt/intel_lrc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1297,8 +1297,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}

void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void
cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct i915_request * const *port, *rq;

Expand Down Expand Up @@ -2355,7 +2355,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)

unwind:
/* Push back any incomplete requests for replay after the reset. */
execlists_cancel_port_requests(execlists);
cancel_port_requests(execlists);
__unwind_incomplete_requests(engine);
}

Expand Down
37 changes: 27 additions & 10 deletions drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
Original file line number Diff line number Diff line change
Expand Up @@ -517,22 +517,21 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{
trace_i915_request_in(rq, idx);

if (!rq->hw_context->inflight)
rq->hw_context->inflight = rq->engine;
intel_context_inflight_inc(rq->hw_context);
intel_gt_pm_get(rq->engine->gt);
/*
* Currently we are not tracking the rq->context being inflight
* (ce->inflight = rq->engine). It is only used by the execlists
* backend at the moment, a similar counting strategy would be
* required if we generalise the inflight tracking.
*/

intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq);
}

static void schedule_out(struct i915_request *rq)
{
trace_i915_request_out(rq);

intel_context_inflight_dec(rq->hw_context);
if (!intel_context_inflight_count(rq->hw_context))
rq->hw_context->inflight = NULL;

intel_gt_pm_put(rq->engine->gt);
i915_request_put(rq);
}
Expand All @@ -556,6 +555,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = NULL;
}

/*
* We write directly into the execlists->inflight queue and don't use
* the execlists->pending queue, as we don't have a distinct switch
* event.
*/
port = first;
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
Expand Down Expand Up @@ -636,6 +640,19 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}

static void
cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct i915_request * const *port, *rq;

/* Note we are only using the inflight and not the pending queue */

for (port = execlists->active; (rq = *port); port++)
schedule_out(rq);
execlists->active =
memset(execlists->inflight, 0, sizeof(execlists->inflight));
}

static void guc_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
Expand All @@ -644,7 +661,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)

spin_lock_irqsave(&engine->active.lock, flags);

execlists_cancel_port_requests(execlists);
cancel_port_requests(execlists);

/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
Expand Down Expand Up @@ -687,7 +704,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);

/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
cancel_port_requests(execlists);

/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
Expand Down

0 comments on commit 5f15c1e

Please sign in to comment.