Skip to content

Commit

Permalink
drm/i915: Keep track of request among the scheduling lists
Browse files Browse the repository at this point in the history
If we keep track of when the i915_request.sched.link is on the HW
runlist, or in the priority queue we can simplify our interactions with
the request (such as during rescheduling). This also simplifies the next
patch where we introduce a new in-between list, for requests that are
ready but neither on the run list or in the queue.

v2: Update i915_sched_node.link explanation for current usage where it
is a link on both the queue and on the runlists.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200116184754.2860848-1-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Jan 16, 2020
1 parent 9e83713 commit 672c368
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 18 deletions.
13 changes: 8 additions & 5 deletions drivers/gpu/drm/i915/gt/intel_lrc.c
Original file line number Diff line number Diff line change
Expand Up @@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));

list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);

active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
Expand Down Expand Up @@ -2431,11 +2433,12 @@ static void execlists_preempt(struct timer_list *timer)
}

static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
struct i915_request *rq)
{
GEM_BUG_ON(!list_empty(&node->link));
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(engine, rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}

static void __submit_queue_imm(struct intel_engine_cs *engine)
Expand Down Expand Up @@ -2471,7 +2474,7 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);

queue_request(engine, &request->sched, rq_prio(request));
queue_request(engine, request);

GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
Expand Down
4 changes: 3 additions & 1 deletion drivers/gpu/drm/i915/i915_request.c
Original file line number Diff line number Diff line change
Expand Up @@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);

if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
}

if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
Expand Down
17 changes: 17 additions & 0 deletions drivers/gpu/drm/i915/i915_request.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,18 @@ enum {
*/
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,

/*
* I915_FENCE_FLAG_PQUEUE - this request is ready for execution
*
* Using the scheduler, when a request is ready for execution it is put
* into the priority queue, and removed from that queue when transferred
* to the HW runlists. We want to track its membership within the
* priority queue so that we can easily check before rescheduling.
*
* See i915_request_in_priority_queue()
*/
I915_FENCE_FLAG_PQUEUE,

/*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
Expand Down Expand Up @@ -361,6 +373,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}

static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
{
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}

/**
* Returns true if seq1 is later than seq2.
*/
Expand Down
22 changes: 10 additions & 12 deletions drivers/gpu/drm/i915/i915_scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,

node->attr.priority = prio;

if (list_empty(&node->link)) {
/*
* If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute
* to our preemption decisions. On the other hand,
* if the request is on the HW, it too is not in the
* queue; but in that case we may still need to reorder
* the inflight requests.
*/
/*
* Once the request is ready, it will be placed into the
* priority lists and then onto the HW runlist. Before the
* request is ready, it does not contribute to our preemption
* decisions and we can safely ignore it, as it will, and
* any preemption required, be dealt with upon submission.
* See engine->submit_request()
*/
if (list_empty(&node->link))
continue;
}

if (!intel_engine_is_virtual(engine) &&
!i915_request_is_active(node_to_request(node))) {
if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
Expand Down

0 comments on commit 672c368

Please sign in to comment.