Skip to content

Commit

Permalink
drm/i915: Refactor blocking waits
Browse files Browse the repository at this point in the history
Tidy up the for loops that handle waiting for read/write vs read-only
access.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-13-git-send-email-chris@chris-wilson.co.uk
  • Loading branch information
Chris Wilson committed Aug 4, 2016
1 parent d72d908 commit 8cac6f6
Showing 1 changed file with 75 additions and 83 deletions.
158 changes: 75 additions & 83 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1339,6 +1339,23 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
}

static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int idx = req->engine->id;

if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);

if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
}

/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
Expand All @@ -1349,39 +1366,34 @@ int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct drm_i915_gem_request *request;
struct reservation_object *resv;
int ret, i;
struct i915_gem_active *active;
unsigned long active_mask;
int idx, ret;

if (readonly) {
request = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (request) {
ret = i915_wait_request(request);
if (ret)
return ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);

i = request->engine->id;
if (i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex) == request)
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
if (!readonly) {
active = obj->last_read;
active_mask = obj->active;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
request = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (!request)
continue;
active_mask = 1;
active = &obj->last_write;
}

ret = i915_wait_request(request);
if (ret)
return ret;
for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;

i915_gem_object_retire__read(obj, i);
}
GEM_BUG_ON(obj->active);
request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (!request)
continue;

ret = i915_wait_request(request);
if (ret)
return ret;

i915_gem_object_retire_request(obj, request);
}

resv = i915_gem_object_get_dmabuf_resv(obj);
Expand All @@ -1397,23 +1409,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
return 0;
}

static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
int idx = req->engine->id;

if (i915_gem_active_peek(&obj->last_read[idx],
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__read(obj, idx);
else if (i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex) == req)
i915_gem_object_retire__write(obj);

if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
}

/* A nonblocking variant of the above wait. This is a highly dangerous routine
* as the object state may change during this call.
*/
Expand All @@ -1425,34 +1420,31 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
struct i915_gem_active *active;
unsigned long active_mask;
int ret, i, n = 0;

BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);

if (!obj->active)
active_mask = obj->active;
if (!active_mask)
return 0;

if (readonly) {
struct drm_i915_gem_request *req;

req = i915_gem_active_get(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req == NULL)
return 0;

requests[n++] = req;
if (!readonly) {
active = obj->last_read;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
active_mask = 1;
active = &obj->last_write;
}

req = i915_gem_active_get(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req == NULL)
continue;
for_each_active(active_mask, i) {
struct drm_i915_gem_request *req;

req = i915_gem_active_get(&active[i],
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
}

mutex_unlock(&dev->struct_mutex);
Expand Down Expand Up @@ -2934,33 +2926,33 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *to)
{
const bool readonly = obj->base.pending_write_domain == 0;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int ret, i, n;
struct i915_gem_active *active;
unsigned long active_mask;
int idx;

if (!obj->active)
return 0;
lockdep_assert_held(&obj->base.dev->struct_mutex);

n = 0;
if (readonly) {
struct drm_i915_gem_request *req;
active_mask = obj->active;
if (!active_mask)
return 0;

req = i915_gem_active_peek(&obj->last_write,
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
if (obj->base.pending_write_domain) {
active = obj->last_read;
} else {
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;

req = i915_gem_active_peek(&obj->last_read[i],
&obj->base.dev->struct_mutex);
if (req)
requests[n++] = req;
}
active_mask = 1;
active = &obj->last_write;
}
for (i = 0; i < n; i++) {
ret = __i915_gem_object_sync(obj, to, requests[i]);

for_each_active(active_mask, idx) {
struct drm_i915_gem_request *request;
int ret;

request = i915_gem_active_peek(&active[idx],
&obj->base.dev->struct_mutex);
if (!request)
continue;

ret = __i915_gem_object_sync(obj, to, request);
if (ret)
return ret;
}
Expand Down

0 comments on commit 8cac6f6

Please sign in to comment.