Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 318611
b: refs/heads/master
c: cc889e0
h: refs/heads/master
i:
  318609: e11fbe0
  318607: 9447ee0
v: v3
  • Loading branch information
Daniel Vetter committed Jun 20, 2012
1 parent 67c6353 commit 747fd1c
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 46 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8e88a2bd5987178d16d53686197404e149e996d9
refs/heads/master: cc889e0f6ce6a63c62db17d702ecfed86d58083f
25 changes: 20 additions & 5 deletions trunk/drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1568,6 +1568,21 @@ i915_add_request(struct intel_ring_buffer *ring,
int was_empty;
int ret;

/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
* things up similar to emitting the lazy request. The difference here
* is that the flush _must_ happen before the next request, no matter
* what.
*/
if (ring->gpu_caches_dirty) {
ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;

ring->gpu_caches_dirty = false;
}

BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);

Expand Down Expand Up @@ -1613,6 +1628,9 @@ i915_add_request(struct intel_ring_buffer *ring,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}

WARN_ON(!list_empty(&ring->gpu_write_list));

return 0;
}

Expand Down Expand Up @@ -1827,14 +1845,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
if (!list_empty(&ring->gpu_write_list)) {
if (ring->gpu_caches_dirty) {
struct drm_i915_gem_request *request;
int ret;

ret = i915_gem_flush_ring(ring,
0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
if (request == NULL ||
i915_add_request(ring, NULL, request))
kfree(request);
}
Expand Down
52 changes: 12 additions & 40 deletions trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -810,33 +810,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}

static int
static void
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;

if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();

if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();

if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}

return 0;
}

static int
Expand Down Expand Up @@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);

if (cd.invalidate_domains | cd.flush_domains) {
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
cd.flush_rings);
if (ret)
return ret;
i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains);
}

if (cd.flips) {
Expand All @@ -905,6 +885,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}

/* Unconditionally invalidate gpu caches. */
ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;

return 0;
}

Expand Down Expand Up @@ -983,26 +968,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
u32 invalidate;

/*
* Ensure that the commands in the batch buffer are
* finished before the interrupt fires.
*
* The sampler always gets flushed on i965 (sigh).
*/
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(ring);
return;
}
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;

/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request);
}
}
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ struct intel_ring_buffer {
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
bool gpu_caches_dirty;

wait_queue_head_t irq_queue;

Expand Down

0 comments on commit 747fd1c

Please sign in to comment.