Skip to content

Commit

Permalink
drm/i915: Add unit tests for the breadcrumb rbtree, wakeups
Browse files Browse the repository at this point in the history
Third retroactive test, make sure that the seqno waiters are woken.

v2: Smattering of comments, rearrange code
v3: Fix IDLE assert to avoid startup/sleep races

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170213171558.20942-5-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Feb 13, 2017
1 parent ae1f809 commit e62e8ad
Showing 1 changed file with 201 additions and 0 deletions.
201 changes: 201 additions & 0 deletions drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,11 +259,212 @@ static int igt_insert_complete(void *arg)
return err;
}

struct igt_wakeup {
struct task_struct *tsk;
atomic_t *ready, *set, *done;
struct intel_engine_cs *engine;
unsigned long flags;
#define STOP 0
#define IDLE 1
wait_queue_head_t *wq;
u32 seqno;
};

static int wait_atomic(atomic_t *p)
{
schedule();
return 0;
}

static int wait_atomic_timeout(atomic_t *p)
{
return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}

static bool wait_for_ready(struct igt_wakeup *w)
{
DEFINE_WAIT(ready);

set_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->done))
wake_up_atomic_t(w->done);

if (test_bit(STOP, &w->flags))
goto out;

for (;;) {
prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
if (atomic_read(w->ready) == 0)
break;

schedule();
}
finish_wait(w->wq, &ready);

out:
clear_bit(IDLE, &w->flags);
if (atomic_dec_and_test(w->set))
wake_up_atomic_t(w->set);

return !test_bit(STOP, &w->flags);
}

static int igt_wakeup_thread(void *arg)
{
struct igt_wakeup *w = arg;
struct intel_wait wait;

while (wait_for_ready(w)) {
GEM_BUG_ON(kthread_should_stop());

intel_wait_init(&wait, w->seqno);
intel_engine_add_wait(w->engine, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
w->seqno))
break;

if (test_bit(STOP, &w->flags)) /* emergency escape */
break;

schedule();
}
intel_engine_remove_wait(w->engine, &wait);
__set_current_state(TASK_RUNNING);
}

return 0;
}

static void igt_wake_all_sync(atomic_t *ready,
atomic_t *set,
atomic_t *done,
wait_queue_head_t *wq,
int count)
{
atomic_set(set, count);
atomic_set(ready, 0);
wake_up_all(wq);

wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
atomic_set(ready, count);
atomic_set(done, count);
}

static int igt_wakeup(void *arg)
{
I915_RND_STATE(prng);
const int state = TASK_UNINTERRUPTIBLE;
struct intel_engine_cs *engine = arg;
struct igt_wakeup *waiters;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
const int count = 4096;
const u32 max_seqno = count / 4;
atomic_t ready, set, done;
int err = -ENOMEM;
int n, step;

mock_engine_reset(engine);

waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;

/* Create a large number of threads, each waiting on a random seqno.
* Multiple waiters will be waiting for the same seqno.
*/
atomic_set(&ready, count);
for (n = 0; n < count; n++) {
waiters[n].wq = &wq;
waiters[n].ready = &ready;
waiters[n].set = &set;
waiters[n].done = &done;
waiters[n].engine = engine;
waiters[n].flags = BIT(IDLE);

waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
"i915/igt:%d", n);
if (IS_ERR(waiters[n].tsk))
goto out_waiters;

get_task_struct(waiters[n].tsk);
}

for (step = 1; step <= max_seqno; step <<= 1) {
u32 seqno;

/* The waiter threads start paused as we assign them a random
* seqno and reset the engine. Once the engine is reset,
* we signal that the threads may begin their wait upon their
* seqno.
*/
for (n = 0; n < count; n++) {
GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
waiters[n].seqno =
1 + prandom_u32_state(&prng) % max_seqno;
}
mock_seqno_advance(engine, 0);
igt_wake_all_sync(&ready, &set, &done, &wq, count);

/* Simulate the GPU doing chunks of work, with one or more
* seqno appearing to finish at the same time. A random number
* of threads will be waiting upon the update and hopefully be
* woken.
*/
for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
usleep_range(50, 500);
mock_seqno_advance(engine, seqno);
}
GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);

/* With the seqno now beyond any of the waiting threads, they
* should all be woken, see that they are complete and signal
* that they are ready for the next test. We wait until all
* threads are complete and waiting for us (i.e. not a seqno).
*/
err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
if (err) {
pr_err("Timed out waiting for %d remaining waiters\n",
atomic_read(&done));
break;
}

err = check_rbtree_empty(engine);
if (err)
break;
}

out_waiters:
for (n = 0; n < count; n++) {
if (IS_ERR(waiters[n].tsk))
break;

set_bit(STOP, &waiters[n].flags);
}
mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
igt_wake_all_sync(&ready, &set, &done, &wq, n);

for (n = 0; n < count; n++) {
if (IS_ERR(waiters[n].tsk))
break;

kthread_stop(waiters[n].tsk);
put_task_struct(waiters[n].tsk);
}

drm_free_large(waiters);
out_engines:
mock_engine_flush(engine);
return err;
}

int intel_breadcrumbs_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_random_insert_remove),
SUBTEST(igt_insert_complete),
SUBTEST(igt_wakeup),
};
struct intel_engine_cs *engine;
int err;
Expand Down

0 comments on commit e62e8ad

Please sign in to comment.