Skip to content

Commit

Permalink
drm/i915: Update i915_scheduler to operate on i915_sched_engine
Browse files Browse the repository at this point in the history
Rather passing around an intel_engine_cs in the scheduling code, pass
around a i915_sched_engine.

v3:
 (Jason Ekstrand)
  Add READ_ONCE around rq->engine in lock_sched_engine

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-8-matthew.brost@intel.com
  • Loading branch information
Matthew Brost authored and Matt Roper committed Jun 18, 2021
1 parent 71ed601 commit d2a31d0
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 29 deletions.
11 changes: 7 additions & 4 deletions drivers/gpu/drm/i915/gt/intel_execlists_submission.c
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
pl = i915_sched_lookup_priolist(engine, prio);
pl = i915_sched_lookup_priolist(engine->sched_engine,
prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));

Expand Down Expand Up @@ -1096,7 +1097,8 @@ static void defer_active(struct intel_engine_cs *engine)
if (!rq)
return;

defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
rq_prio(rq)));
}

static bool
Expand Down Expand Up @@ -2083,7 +2085,7 @@ static void __execlists_unhold(struct i915_request *rq)

i915_request_clear_hold(rq);
list_move_tail(&rq->sched.link,
i915_sched_lookup_priolist(rq->engine,
i915_sched_lookup_priolist(rq->engine->sched_engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);

Expand Down Expand Up @@ -2452,7 +2454,8 @@ static void queue_request(struct intel_engine_cs *engine,
{
GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(engine, rq_prio(rq)));
i915_sched_lookup_priolist(engine->sched_engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ static inline void queue_request(struct intel_engine_cs *engine,
{
GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(engine, prio));
i915_sched_lookup_priolist(engine->sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}

Expand Down
46 changes: 23 additions & 23 deletions drivers/gpu/drm/i915/i915_scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,13 @@ static void assert_priolists(struct i915_sched_engine * const sched_engine)
}

struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
{
struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;

lockdep_assert_held(&engine->sched_engine->lock);
lockdep_assert_held(&sched_engine->lock);
assert_priolists(sched_engine);

if (unlikely(sched_engine->no_priolist))
Expand Down Expand Up @@ -130,13 +129,13 @@ struct sched_cache {
struct list_head *priolist;
};

static struct intel_engine_cs *
sched_lock_engine(const struct i915_sched_node *node,
struct intel_engine_cs *locked,
static struct i915_sched_engine *
lock_sched_engine(struct i915_sched_node *node,
struct i915_sched_engine *locked,
struct sched_cache *cache)
{
const struct i915_request *rq = node_to_request(node);
struct intel_engine_cs *engine;
struct i915_sched_engine *sched_engine;

GEM_BUG_ON(!locked);

Expand All @@ -146,22 +145,22 @@ sched_lock_engine(const struct i915_sched_node *node,
* engine lock. The simple ploy we use is to take the lock then
* check that the rq still belongs to the newly locked engine.
*/
while (locked != (engine = READ_ONCE(rq->engine))) {
spin_unlock(&locked->sched_engine->lock);
while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) {
spin_unlock(&locked->lock);
memset(cache, 0, sizeof(*cache));
spin_lock(&engine->sched_engine->lock);
locked = engine;
spin_lock(&sched_engine->lock);
locked = sched_engine;
}

GEM_BUG_ON(locked != engine);
GEM_BUG_ON(locked != sched_engine);
return locked;
}

static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
const int prio = max(attr->priority, node->attr.priority);
struct intel_engine_cs *engine;
struct i915_sched_engine *sched_engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
struct sched_cache cache;
Expand Down Expand Up @@ -236,23 +235,24 @@ static void __i915_schedule(struct i915_sched_node *node,
}

memset(&cache, 0, sizeof(cache));
engine = node_to_request(node)->engine;
spin_lock(&engine->sched_engine->lock);
sched_engine = node_to_request(node)->engine->sched_engine;
spin_lock(&sched_engine->lock);

/* Fifo and depth-first replacement ensure our deps execute before us */
engine = sched_lock_engine(node, engine, &cache);
sched_engine = lock_sched_engine(node, sched_engine, &cache);
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
INIT_LIST_HEAD(&dep->dfs_link);

node = dep->signaler;
engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->sched_engine->lock);
sched_engine = lock_sched_engine(node, sched_engine, &cache);
lockdep_assert_held(&sched_engine->lock);

/* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node))
continue;

GEM_BUG_ON(node_to_request(node)->engine != engine);
GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=
sched_engine);

WRITE_ONCE(node->attr.priority, prio);

Expand All @@ -270,17 +270,17 @@ static void __i915_schedule(struct i915_sched_node *node,
if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
i915_sched_lookup_priolist(sched_engine,
prio);
list_move_tail(&node->link, cache.priolist);
}

/* Defer (tasklet) submission until after all of our updates. */
if (engine->sched_engine->kick_backend)
engine->sched_engine->kick_backend(node_to_request(node), prio);
if (sched_engine->kick_backend)
sched_engine->kick_backend(node_to_request(node), prio);
}

spin_unlock(&engine->sched_engine->lock);
spin_unlock(&sched_engine->lock);
}

void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_scheduler.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);

struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio);

void __i915_priolist_free(struct i915_priolist *p);
static inline void i915_priolist_free(struct i915_priolist *p)
Expand Down

0 comments on commit d2a31d0

Please sign in to comment.