Skip to content

Commit

Permalink
drm/i915: Explicitly pin the logical context for execbuf
Browse files Browse the repository at this point in the history
In order to separate the reservation phase of building a request from
its emission phase, we need to pull some of the request alloc activities
from deep inside i915_request to the surface, GEM_EXECBUFFER.

v2: Be frivolous, use a local drm_i915_private.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190425050143.811-1-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Apr 25, 2019
1 parent 79ffac8 commit 8f2a105
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 48 deletions.
108 changes: 69 additions & 39 deletions drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>

#include "gt/intel_gt_pm.h"

#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_trace.h"
Expand Down Expand Up @@ -236,7 +238,8 @@ struct i915_execbuffer {
unsigned int *flags;

struct intel_engine_cs *engine; /** engine to queue the request to */
struct i915_gem_context *ctx; /** context for building the request */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_address_space *vm; /** GTT and vma for the request */

struct i915_request *request; /** our request to build */
Expand Down Expand Up @@ -738,7 +741,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
if (unlikely(!ctx))
return -ENOENT;

eb->ctx = ctx;
eb->gem_context = ctx;
if (ctx->ppgtt) {
eb->vm = &ctx->ppgtt->vm;
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
Expand Down Expand Up @@ -784,7 +787,6 @@ static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)

static int eb_wait_for_ring(const struct i915_execbuffer *eb)
{
const struct intel_context *ce;
struct i915_request *rq;
int ret = 0;

Expand All @@ -794,11 +796,7 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/

ce = intel_context_lookup(eb->ctx, eb->engine);
if (!ce || !ce->ring) /* first use, assume empty! */
return 0;

rq = __eb_wait_for_ring(ce->ring);
rq = __eb_wait_for_ring(eb->context->ring);
if (rq) {
mutex_unlock(&eb->i915->drm.struct_mutex);

Expand All @@ -817,15 +815,15 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)

static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
struct drm_i915_gem_object *obj;
unsigned int i, batch;
int err;

if (unlikely(i915_gem_context_is_closed(eb->ctx)))
if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
return -ENOENT;

if (unlikely(i915_gem_context_is_banned(eb->ctx)))
if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO;

INIT_LIST_HEAD(&eb->relocs);
Expand Down Expand Up @@ -870,8 +868,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (!vma->open_count++)
i915_vma_reopen(vma);
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx;
list_add(&lut->ctx_link, &eb->gem_context->handles_list);
lut->ctx = eb->gem_context;
lut->handle = handle;

add_vma:
Expand Down Expand Up @@ -1227,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;

rq = i915_request_alloc(eb->engine, eb->ctx);
rq = i915_request_create(eb->context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
Expand Down Expand Up @@ -2088,52 +2086,86 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_VEBOX] = VECS0
};

static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
static int eb_pin_context(struct i915_execbuffer *eb,
struct intel_engine_cs *engine)
{
struct intel_context *ce;
int err;

/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
err = i915_terminally_wedged(eb->i915);
if (err)
return err;

/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
ce = intel_context_pin(eb->gem_context, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);

eb->engine = engine;
eb->context = ce;
return 0;
}

static void eb_unpin_context(struct i915_execbuffer *eb)
{
intel_context_unpin(eb->context);
}

static int
eb_select_engine(struct i915_execbuffer *eb,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args)
{
struct drm_i915_private *i915 = eb->i915;
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
struct intel_engine_cs *engine;

if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
return NULL;
return -EINVAL;
}

if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return NULL;
return -EINVAL;
}

if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
bsd_idx = gen8_dispatch_bsd_engine(i915, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
bsd_idx--;
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
return NULL;
return -EINVAL;
}

engine = dev_priv->engine[_VCS(bsd_idx)];
engine = i915->engine[_VCS(bsd_idx)];
} else {
engine = dev_priv->engine[user_ring_map[user_ring_id]];
engine = i915->engine[user_ring_map[user_ring_id]];
}

if (!engine) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return NULL;
return -EINVAL;
}

return engine;
return eb_pin_context(eb, engine);
}

static void
Expand Down Expand Up @@ -2275,7 +2307,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;

Expand Down Expand Up @@ -2335,29 +2366,27 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;

eb.engine = eb_select_engine(eb.i915, file, args);
if (!eb.engine) {
err = -EINVAL;
goto err_engine;
}

/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process. Upon first dispatch, we acquire another prolonged
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
wakeref = intel_runtime_pm_get(eb.i915);
intel_gt_pm_get(eb.i915);

err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;

err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
err = eb_select_engine(&eb, file, args);
if (unlikely(err))
goto err_unlock;

err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
if (unlikely(err))
goto err_engine;

err = eb_relocate(&eb);
if (err) {
/*
Expand Down Expand Up @@ -2441,7 +2470,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(eb.reloc_cache.rq);

/* Allocate a request for this batch buffer nice and early. */
eb.request = i915_request_alloc(eb.engine, eb.ctx);
eb.request = i915_request_create(eb.context);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
Expand Down Expand Up @@ -2479,8 +2508,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
err_request:
i915_request_add(eb.request);
add_to_client(eb.request, file);
i915_request_add(eb.request);

if (fences)
signal_fence_array(&eb, fences);
Expand All @@ -2502,12 +2531,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
err_engine:
eb_unpin_context(&eb);
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915, wakeref);
err_engine:
i915_gem_context_put(eb.ctx);
intel_gt_pm_put(eb.i915);
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
err_out_fence:
Expand Down
9 changes: 0 additions & 9 deletions drivers/gpu/drm/i915/i915_request.c
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
struct drm_i915_private *i915 = engine->i915;
struct intel_context *ce;
struct i915_request *rq;
int ret;

/*
* Preempt contexts are reserved for exclusive use to inject a
Expand All @@ -794,14 +793,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
GEM_BUG_ON(ctx == i915->preempt_context);

/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
ret = i915_terminally_wedged(i915);
if (ret)
return ERR_PTR(ret);

/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
Expand Down

0 comments on commit 8f2a105

Please sign in to comment.