From a55ed9eb18865a8ec52781e956247e75910c382e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Jul 2012 18:02:28 +0100 Subject: [PATCH] --- yaml --- r: 329267 b: refs/heads/master c: 6c6cf5aa9c583478b19e23149feaa92d01fb8c2d h: refs/heads/master i: 329265: b5be4797f99aec80398f0d1c00bec3a206b7fcec 329263: a603c07fc680f90f98bf7515c0c5763b75c38b41 v: v3 --- [refs] | 2 +- trunk/drivers/gpu/drm/i915/intel_ringbuffer.c | 33 +++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/[refs] b/[refs] index 9d150ab5e009..9343f4eadc62 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: ab3951eb74e7c33a2f5b7b64d72e82f1eea61571 +refs/heads/master: 6c6cf5aa9c583478b19e23149feaa92d01fb8c2d diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index c58f1b91d08b..8733da529edf 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -214,15 +214,8 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; int ret; - /* Force SNB workarounds for PIPE_CONTROL flushes */ - ret = intel_emit_post_sync_nonzero_flush(ring); - if (ret) - return ret; - /* Just flush everything. Experiments have shown that reducing the * number of bits based on the write domains has little performance * impact. @@ -242,21 +235,33 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, if (flush_domains) flags |= PIPE_CONTROL_CS_STALL; - ret = intel_ring_begin(ring, 6); + ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); /* lower dword */ - intel_ring_emit(ring, 0); /* uppwer dword */ - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); intel_ring_advance(ring); return 0; } +static int +gen6_render_ring_flush__wa(struct intel_ring_buffer *ring, + u32 invalidate_domains, u32 flush_domains) +{ + int ret; + + /* Force SNB workarounds for PIPE_CONTROL flushes */ + ret = intel_emit_post_sync_nonzero_flush(ring); + if (ret) + return ret; + + return gen6_render_ring_flush(ring, invalidate_domains, flush_domains); +} + static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { @@ -1371,6 +1376,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; ring->flush = gen6_render_ring_flush; + if (INTEL_INFO(dev)->gen == 6) + ring->flush = gen6_render_ring_flush__wa; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT;