Skip to content

Commit

Permalink
drm/i915: Make sure execbuffer always passes ww state to i915_vma_pin.
Browse files Browse the repository at this point in the history
As a preparation step for full object locking and wait/wound handling
during pin and object mapping, ensure that we always pass the ww context
in i915_gem_execbuffer.c to i915_vma_pin, use lockdep to ensure this
happens.

This also requires changing the order of eb_parse slightly, to ensure
we pass ww at a point where we could still handle -EDEADLK safely.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200819140904.1708856-15-maarten.lankhorst@linux.intel.com
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
  • Loading branch information
Maarten Lankhorst authored and Joonas Lahtinen committed Sep 7, 2020
1 parent 3999a70 commit 47b0869
Show file tree
Hide file tree
Showing 26 changed files with 217 additions and 137 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/display/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -3451,7 +3451,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ERR(vma))
goto err_obj;

if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
goto err_obj;

if (i915_gem_object_is_tiled(obj) &&
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/gem/i915_gem_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -1154,7 +1154,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,

i915_gem_ww_ctx_init(&ww, true);
retry:
err = intel_context_pin(ce);
err = intel_context_pin_ww(ce, &ww);
if (err)
goto err;

Expand Down Expand Up @@ -1247,7 +1247,7 @@ static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww

if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
/* ppGTT is not part of the legacy context image */
return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);

return 0;
}
Expand Down
140 changes: 76 additions & 64 deletions drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -436,16 +436,17 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags |= PIN_GLOBAL;

/* Attempt to reuse the current location if available */
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
/* TODO: Add -EDEADLK handling here */
if (unlikely(i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags))) {
if (entry->flags & EXEC_OBJECT_PINNED)
return false;

/* Failing that pick any _free_ space if suitable */
if (unlikely(i915_vma_pin(vma,
entry->pad_to_size,
entry->alignment,
eb_pin_flags(entry, ev->flags) |
PIN_USER | PIN_NOEVICT)))
if (unlikely(i915_vma_pin_ww(vma, &eb->ww,
entry->pad_to_size,
entry->alignment,
eb_pin_flags(entry, ev->flags) |
PIN_USER | PIN_NOEVICT)))
return false;
}

Expand Down Expand Up @@ -586,7 +587,7 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
static int eb_reserve_vma(struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
{
Expand All @@ -601,7 +602,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
return err;
}

err = i915_vma_pin(vma,
err = i915_vma_pin_ww(vma, &eb->ww,
entry->pad_to_size, entry->alignment,
eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
Expand Down Expand Up @@ -1132,9 +1133,10 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
}

static void *reloc_iomap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
struct i915_execbuffer *eb,
unsigned long page)
{
struct reloc_cache *cache = &eb->reloc_cache;
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
void *vaddr;
Expand All @@ -1156,10 +1158,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err)
return ERR_PTR(err);

vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (vma == ERR_PTR(-EDEADLK))
return vma;

if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
mutex_lock(&ggtt->vm.mutex);
Expand Down Expand Up @@ -1195,17 +1200,18 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}

static void *reloc_vaddr(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
struct i915_execbuffer *eb,
unsigned long page)
{
struct reloc_cache *cache = &eb->reloc_cache;
void *vaddr;

if (cache->page == page) {
vaddr = unmask_page(cache->vaddr);
} else {
vaddr = NULL;
if ((cache->vaddr & KMAP) == 0)
vaddr = reloc_iomap(obj, cache, page);
vaddr = reloc_iomap(obj, eb, page);
if (!vaddr)
vaddr = reloc_kmap(obj, cache, page);
}
Expand Down Expand Up @@ -1292,7 +1298,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unmap;
}

err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
if (err)
goto err_unmap;

Expand All @@ -1313,7 +1319,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
eb->reloc_context = ce;
}

err = intel_context_pin(ce);
err = intel_context_pin_ww(ce, &eb->ww);
if (err)
goto err_unpin;

Expand Down Expand Up @@ -1536,8 +1542,7 @@ relocate_entry(struct i915_vma *vma,
void *vaddr;

repeat:
vaddr = reloc_vaddr(vma->obj,
&eb->reloc_cache,
vaddr = reloc_vaddr(vma->obj, eb,
offset >> PAGE_SHIFT);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
Expand Down Expand Up @@ -1953,6 +1958,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
rq = eb_pin_engine(eb, false);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
rq = NULL;
goto err;
}

Expand Down Expand Up @@ -2236,7 +2242,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
}

static struct i915_vma *
shadow_batch_pin(struct drm_i915_gem_object *obj,
shadow_batch_pin(struct i915_execbuffer *eb,
struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned int flags)
{
Expand All @@ -2247,7 +2254,7 @@ shadow_batch_pin(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return vma;

err = i915_vma_pin(vma, 0, 0, flags);
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
if (err)
return ERR_PTR(err);

Expand Down Expand Up @@ -2397,16 +2404,33 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
return err;
}

static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
{
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (eb->batch_flags & I915_DISPATCH_SECURE)
return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);

return NULL;
}

static int eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
struct i915_vma *shadow, *trampoline;
struct i915_vma *shadow, *trampoline, *batch;
unsigned int len;
int err;

if (!eb_use_cmdparser(eb))
return 0;
if (!eb_use_cmdparser(eb)) {
batch = eb_dispatch_secure(eb, eb->batch->vma);
if (IS_ERR(batch))
return PTR_ERR(batch);

goto secure_batch;
}

len = eb->batch_len;
if (!CMDPARSER_USES_GGTT(eb->i915)) {
Expand Down Expand Up @@ -2434,7 +2458,7 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err;

shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
if (IS_ERR(shadow)) {
err = PTR_ERR(shadow);
goto err;
Expand All @@ -2446,7 +2470,7 @@ static int eb_parse(struct i915_execbuffer *eb)
if (CMDPARSER_USES_GGTT(eb->i915)) {
trampoline = shadow;

shadow = shadow_batch_pin(pool->obj,
shadow = shadow_batch_pin(eb, pool->obj,
&eb->engine->gt->ggtt->vm,
PIN_GLOBAL);
if (IS_ERR(shadow)) {
Expand All @@ -2459,19 +2483,34 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->batch_flags |= I915_DISPATCH_SECURE;
}

batch = eb_dispatch_secure(eb, shadow);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_trampoline;
}

err = eb_parse_pipeline(eb, shadow, trampoline);
if (err)
goto err_trampoline;
goto err_unpin_batch;

eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
eb->batch = &eb->vma[eb->buffer_count++];
eb->batch->vma = i915_vma_get(shadow);
eb->batch->flags = __EXEC_OBJECT_HAS_PIN;

eb->trampoline = trampoline;
eb->batch_start_offset = 0;

secure_batch:
if (batch) {
eb->batch = &eb->vma[eb->buffer_count++];
eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
eb->batch->vma = i915_vma_get(batch);
}
return 0;

err_unpin_batch:
if (batch)
i915_vma_unpin(batch);
err_trampoline:
if (trampoline)
i915_vma_unpin(trampoline);
Expand Down Expand Up @@ -2613,7 +2652,7 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throt
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
err = intel_context_pin(ce);
err = intel_context_pin_ww(ce, &eb->ww);
if (err)
return ERR_PTR(err);

Expand Down Expand Up @@ -3231,33 +3270,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,

ww_acquire_done(&eb.ww.ctx);

/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma;

/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
* so we don't really have issues with multiple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
vma = i915_gem_object_ggtt_pin(eb.batch->vma->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_vma;
}

batch = vma;
} else {
batch = eb.batch->vma;
}
batch = eb.batch->vma;

/* All GPU relocation batches must be submitted prior to the user rq */
GEM_BUG_ON(eb.reloc_cache.rq);
Expand All @@ -3266,7 +3279,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.request = i915_request_create(eb.context);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
goto err_batch_unpin;
goto err_vma;
}

if (in_fence) {
Expand Down Expand Up @@ -3327,9 +3340,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
}
i915_request_put(eb.request);

err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
i915_vma_unpin(batch);
err_vma:
eb_release_vmas(&eb, true);
if (eb.trampoline)
Expand Down Expand Up @@ -3417,7 +3427,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
exec2_list = kvmalloc_array(count + 1, eb_element_size(),

/* Allocate extra slots for use by the command parser */
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
drm_dbg(&i915->drm,
Expand Down Expand Up @@ -3494,8 +3506,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;

/* Allocate an extra slot for use by the command parser */
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
/* Allocate extra slots for use by the command parser */
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
if (err)
return err;

err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
if (err)
return err;

Expand Down Expand Up @@ -139,7 +139,7 @@ static int igt_gpu_reloc(void *arg)

i915_gem_ww_ctx_init(&eb.ww, false);
retry:
err = intel_context_pin(eb.context);
err = intel_context_pin_ww(eb.context, &eb.ww);
if (!err) {
err = __igt_gpu_reloc(&eb, scratch);

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/gt/gen6_ppgtt.c
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
return vma;
}

int gen6_ppgtt_pin(struct i915_ppgtt *base)
int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
Expand All @@ -394,7 +394,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base)
*/
err = 0;
if (!atomic_read(&ppgtt->pin_count))
err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
if (!err)
atomic_inc(&ppgtt->pin_count);
mutex_unlock(&ppgtt->pin_mutex);
Expand Down
Loading

0 comments on commit 47b0869

Please sign in to comment.