Skip to content

Commit

Permalink
drm/i915/execlists: Pull submit after dequeue under timeline lock
Browse files Browse the repository at this point in the history
In the next patch, we will begin processing the CSB from inside the
submission path (underneath an irqsoff section, and even from inside
interrupt handlers). This means that updating the execlists->port[] will
no longer be serialised by the tasklet but needs to be locked by the
engine->timeline.lock instead. Pull dequeue and submit under the same
lock for protection. (An alternate future plan is to keep the in/out
arrays separate for concurrent processing and reduced lock coverage.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180628201211.13837-2-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Jun 28, 2018
1 parent 74093f3 commit 0b02bef
Showing 1 changed file with 12 additions and 20 deletions.
32 changes: 12 additions & 20 deletions drivers/gpu/drm/i915/intel_lrc.c
Original file line number Diff line number Diff line change
Expand Up @@ -567,7 +567,7 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
}

static bool __execlists_dequeue(struct intel_engine_cs *engine)
static void __execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
Expand Down Expand Up @@ -622,11 +622,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* the HW to indicate that it has had a chance to respond.
*/
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
return false;
return;

if (need_preempt(engine, last, execlists->queue_priority)) {
inject_preempt_context(engine);
return false;
return;
}

/*
Expand All @@ -651,7 +651,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
* priorities of the ports haven't been switch.
*/
if (port_count(&port[1]))
return false;
return;

/*
* WaIdleLiteRestore:bdw,skl
Expand Down Expand Up @@ -751,8 +751,10 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
port != execlists->port ? rq_prio(last) : INT_MIN;

execlists->first = rb;
if (submit)
if (submit) {
port_assign(port, last);
execlists_submit_ports(engine);
}

/* We must always keep the beast fed if we have work piled up */
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
Expand All @@ -761,24 +763,19 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
if (last)
execlists_user_begin(execlists, execlists->port);

return submit;
/* If the engine is now idle, so should be the flag; and vice versa. */
GEM_BUG_ON(execlists_is_active(&engine->execlists,
EXECLISTS_ACTIVE_USER) ==
!port_isset(engine->execlists.port));
}

static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
bool submit;

spin_lock_irqsave(&engine->timeline.lock, flags);
submit = __execlists_dequeue(engine);
__execlists_dequeue(engine);
spin_unlock_irqrestore(&engine->timeline.lock, flags);

if (submit)
execlists_submit_ports(engine);

GEM_BUG_ON(port_isset(execlists->port) &&
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
}

void
Expand Down Expand Up @@ -1162,11 +1159,6 @@ static void execlists_submission_tasklet(unsigned long data)

if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);

/* If the engine is now idle, so should be the flag; and vice versa. */
GEM_BUG_ON(execlists_is_active(&engine->execlists,
EXECLISTS_ACTIVE_USER) ==
!port_isset(engine->execlists.port));
}

static void queue_request(struct intel_engine_cs *engine,
Expand Down

0 comments on commit 0b02bef

Please sign in to comment.