Skip to content

Commit

Permalink
drm/i915/selftests: Flush all active callbacks
Browse files Browse the repository at this point in the history
Flushing the outer i915_active is not enough, as we need the barrier to
be applied across all the active dma_fence callbacks. So we must
serialise with each outstanding fence.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112096
References: f79520b ("drm/i915/selftests: Synchronize checking active status with retirement")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191101181022.25633-1-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Nov 2, 2019
1 parent 9278bbb commit 3881376
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 3 deletions.
4 changes: 1 addition & 3 deletions drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,7 @@ static struct pulse *pulse_create(void)

static void pulse_unlock_wait(struct pulse *p)
{
mutex_lock(&p->active.mutex);
mutex_unlock(&p->active.mutex);
flush_work(&p->active.work);
i915_active_unlock_wait(&p->active);
}

static int __live_idle_pulse(struct intel_engine_cs *engine,
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/i915_active.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,5 +215,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);

void i915_active_print(struct i915_active *ref, struct drm_printer *m);
void i915_active_unlock_wait(struct i915_active *ref);

#endif /* _I915_ACTIVE_H_ */
33 changes: 33 additions & 0 deletions drivers/gpu/drm/i915/selftests/i915_active.c
Original file line number Diff line number Diff line change
Expand Up @@ -250,3 +250,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
i915_active_release(ref);
}
}

static void spin_unlock_wait(spinlock_t *lock)
{
spin_lock_irq(lock);
spin_unlock_irq(lock);
}

void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;

rcu_read_lock();
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
struct dma_fence *f;

/* Wait for all active callbacks */
f = rcu_dereference(it->base.fence);
if (f)
spin_unlock_wait(f->lock);
}
rcu_read_unlock();

i915_active_release(ref);
}

/* And wait for the retire callback */
mutex_lock(&ref->mutex);
mutex_unlock(&ref->mutex);

/* ... which may have been on a thread instead */
flush_work(&ref->work);
}

0 comments on commit 3881376

Please sign in to comment.