Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 329360
b: refs/heads/master
c: 7788a76
h: refs/heads/master
v: v3
  • Loading branch information
Chris Wilson authored and Daniel Vetter committed Aug 24, 2012
1 parent b274d3b commit 9ea3d8e
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 70 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 050ee91f128bd767b1413383fea6c973aa464c54
refs/heads/master: 7788a765205f63abcb8645c16c85a968bd578f4f
114 changes: 45 additions & 69 deletions trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}

#define __EXEC_OBJECT_HAS_FENCE (1<<31)
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)

static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
Expand All @@ -341,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
}

static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
Expand All @@ -359,11 +361,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (ret)
return ret;

entry->flags |= __EXEC_OBJECT_HAS_PIN;

if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
goto err_unpin;
return ret;

if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
Expand All @@ -372,24 +376,46 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
}
}

/* Ensure ppgtt mapping exists if needed */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);

obj->has_aliasing_ppgtt_mapping = 1;
}

entry->offset = obj->gtt_offset;
return 0;
}

err_unpin:
i915_gem_object_unpin(obj);
return ret;
static void
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;

if (!obj->gtt_space)
return;

entry = obj->exec_entry;

if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);

if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);

entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;

INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
Expand Down Expand Up @@ -427,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
* 2. Bind new objects.
* 3. Decrement pin count.
*
* This avoid unnecessary unbinding of later objects in order to makr
* This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
*/
retry = 0;
do {
ret = 0;
int ret = 0;

/* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) {
Expand All @@ -452,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = pin_and_fence_object(obj, ring);
ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret)
goto err;
}
Expand All @@ -462,46 +488,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;

ret = pin_and_fence_object(obj, ring);
if (ret) {
int ret_ignore;

/* This can potentially raise a harmless
* -EINVAL if we failed to bind in the above
* call. It cannot raise -EINTR since we know
* that the bo is freshly bound and so will
* not need to be flushed or waited upon.
*/
ret_ignore = i915_gem_object_unbind(obj);
(void)ret_ignore;
WARN_ON(obj->gtt_space);
break;
}
ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret)
goto err;
}

/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;

if (!obj->gtt_space)
continue;

entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}

i915_gem_object_unpin(obj);

/* ... and ensure ppgtt mapping exist if needed. */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);

obj->has_aliasing_ppgtt_mapping = 1;
}
}
err: /* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list)
i915_gem_execbuffer_unreserve_object(obj);

if (ret != -ENOSPC || retry++)
return ret;
Expand All @@ -510,24 +504,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (ret)
return ret;
} while (1);

err:
list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;

if (!obj->gtt_space)
continue;

entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}

i915_gem_object_unpin(obj);
}

return ret;
}

static int
Expand Down

0 comments on commit 9ea3d8e

Please sign in to comment.