Skip to content

Commit

Permalink
drm/i915: Update a bunch of LRC functions to take requests
Browse files Browse the repository at this point in the history
A bunch of the low level LRC functions were passing around ringbuf and ctx
pairs. In a few cases, they took the r/c pair and a request as well. This is all
quite messy and unnecesary. The context_queue() call is especially bad since the
fake request code got removed - it takes a request and three extra things that
must be extracted from the request and then it checks them against what it finds
in the request. Removing all the derivable data makes the code much simpler all
round.

This patch updates those functions to just take the request structure.

Note that logical_ring_wait_for_space now takes a request structure but already
had a local request pointer that it uses to scan for something to wait on. To
avoid confusion the local variable has been renamed 'target' (it is searching
for a target request to do something with) and the parameter has been called req
(to guarantee anything accidentally missed gets a compiler error).

v2: Updated commit message re wait_for_space (Tomas Elf review comment).

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
John Harrison authored and Daniel Vetter committed Jun 23, 2015
1 parent 9bb1af4 commit ae70797
Showing 1 changed file with 29 additions and 37 deletions.
66 changes: 29 additions & 37 deletions drivers/gpu/drm/i915/intel_lrc.c
Original file line number Diff line number Diff line change
Expand Up @@ -544,23 +544,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
((u32)ring->next_context_status_buffer & 0x07) << 8);
}

static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail,
struct drm_i915_gem_request *request)
static int execlists_context_queue(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = request->ring;
struct drm_i915_gem_request *cursor;
int num_elements = 0;

if (to != ring->default_context)
intel_lr_context_pin(ring, to);

WARN_ON(!request);
WARN_ON(to != request->ctx);
if (request->ctx != ring->default_context)
intel_lr_context_pin(ring, request->ctx);

i915_gem_request_reference(request);

request->tail = tail;
request->tail = request->ringbuf->tail;

spin_lock_irq(&ring->execlist_lock);

Expand All @@ -575,7 +570,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
struct drm_i915_gem_request,
execlist_link);

if (to == tail_req->ctx) {
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
Expand Down Expand Up @@ -659,12 +654,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return 0;
}

static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf = req->ringbuf;
struct intel_engine_cs *ring = req->ring;
struct drm_i915_gem_request *target;
unsigned space;
int ret;

Expand All @@ -674,26 +669,26 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
if (intel_ring_space(ringbuf) >= bytes)
return 0;

list_for_each_entry(request, &ring->request_list, list) {
list_for_each_entry(target, &ring->request_list, list) {
/*
* The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering.
*/
if (request->ringbuf != ringbuf)
if (target->ringbuf != ringbuf)
continue;

/* Would completion of this request free enough space? */
space = __intel_ring_space(request->postfix, ringbuf->tail,
space = __intel_ring_space(target->postfix, ringbuf->tail,
ringbuf->size);
if (space >= bytes)
break;
}

if (WARN_ON(&request->list == &ring->request_list))
if (WARN_ON(&target->list == &ring->request_list))
return -ENOSPC;

ret = i915_wait_request(request);
ret = i915_wait_request(target);
if (ret)
return ret;

Expand All @@ -703,39 +698,37 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,

/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance.
* @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static void
intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
struct drm_i915_gem_request *request)
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct intel_engine_cs *ring = request->ring;

intel_logical_ring_advance(ringbuf);
intel_logical_ring_advance(request->ringbuf);

if (intel_ring_stopped(ring))
return;

execlists_context_queue(ring, ctx, ringbuf->tail, request);
execlists_context_queue(request);
}

static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx)
static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;

/* Can't wrap if space has already been reserved! */
WARN_ON(ringbuf->reserved_in_use);

if (ringbuf->space < rem) {
int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
int ret = logical_ring_wait_for_space(req, rem);

if (ret)
return ret;
Expand All @@ -752,9 +745,9 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
return 0;
}

static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx, int bytes)
static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
int ret;

/*
Expand All @@ -766,7 +759,7 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
bytes += ringbuf->reserved_size;

if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = logical_ring_wrap_buffer(ringbuf, ctx);
ret = logical_ring_wrap_buffer(req);
if (unlikely(ret))
return ret;

Expand All @@ -779,7 +772,7 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
}

if (unlikely(ringbuf->space < bytes)) {
ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
ret = logical_ring_wait_for_space(req, bytes);
if (unlikely(ret))
return ret;
}
Expand Down Expand Up @@ -814,8 +807,7 @@ static int intel_logical_ring_begin(struct drm_i915_gem_request *req,
if (ret)
return ret;

ret = logical_ring_prepare(req->ringbuf, req->ctx,
num_dwords * sizeof(uint32_t));
ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret)
return ret;

Expand Down Expand Up @@ -1557,7 +1549,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
intel_logical_ring_advance_and_submit(request);

/*
* Here we add two extra NOOPs as padding to avoid
Expand Down

0 comments on commit ae70797

Please sign in to comment.