Skip to content

Commit

Permalink
drm/i915: Rearrange code to only have a single method for waiting upo…
Browse files Browse the repository at this point in the history
…n the ring

Replace the wait for the ring to be clear with the more common wait for
the ring to be idle. The principle advantage is one less exported
intel_ring_wait function, and the removal of a hardcoded value.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
Chris Wilson authored and Daniel Vetter committed Nov 29, 2012
1 parent b662a06 commit 3e96050
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 61 deletions.
4 changes: 1 addition & 3 deletions drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -592,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)

static int i915_quiescent(struct drm_device *dev)
{
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);

i915_kernel_lost_context(dev);
return intel_wait_ring_idle(ring);
return intel_ring_idle(LP_RING(dev->dev_private));
}

static int i915_flush_ioctl(struct drm_device *dev, void *data,
Expand Down
25 changes: 1 addition & 24 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -2480,29 +2480,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}

static int i915_ring_idle(struct intel_ring_buffer *ring)
{
u32 seqno;
int ret;

/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
if (ret)
return ret;
}

/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
return 0;

seqno = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
list)->seqno;

return i915_wait_seqno(ring, seqno);
}

int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
Expand All @@ -2515,7 +2492,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;

ret = i915_ring_idle(ring);
ret = intel_ring_idle(ring);
if (ret)
return ret;
}
Expand Down
8 changes: 7 additions & 1 deletion drivers/gpu/drm/i915/intel_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2653,6 +2653,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;

/* rc6 disabled by default due to repeated reports of hanging during
Expand All @@ -2667,13 +2668,17 @@ static void ironlake_enable_rc6(struct drm_device *dev)
if (ret)
return;

was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;

/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
dev_priv->mm.interruptible = was_interruptible;
return;
}

Expand All @@ -2694,7 +2699,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(ring);
ret = intel_ring_idle(ring);
dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
Expand Down
73 changes: 48 additions & 25 deletions drivers/gpu/drm/i915/intel_ringbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1175,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)

/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_idle(ring);
ret = intel_ring_idle(ring);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
Expand All @@ -1194,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
cleanup_status_page(ring);
}

static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;

if (ring->space < rem) {
int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}

virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);

ring->tail = 0;
ring->space = ring_space(ring);

return 0;
}

static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
Expand Down Expand Up @@ -1284,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
return 0;
}

int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
Expand Down Expand Up @@ -1327,6 +1305,51 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return -EBUSY;
}

static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;

if (ring->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}

virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);

ring->tail = 0;
ring->space = ring_space(ring);

return 0;
}

int intel_ring_idle(struct intel_ring_buffer *ring)
{
u32 seqno;
int ret;

/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
if (ret)
return ret;
}

/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
return 0;

seqno = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
list)->seqno;

return i915_wait_seqno(ring, seqno);
}

static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
Expand Down Expand Up @@ -1359,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
}

if (unlikely(ring->space < n)) {
ret = intel_wait_ring_buffer(ring, n);
ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
}
Expand Down
9 changes: 1 addition & 8 deletions drivers/gpu/drm/i915/intel_ringbuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,22 +187,15 @@ intel_read_status_page(struct intel_ring_buffer *ring,

void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);

int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
return intel_wait_ring_buffer(ring, ring->size - 8);
}

int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);

static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}

void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);

int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
Expand Down

0 comments on commit 3e96050

Please sign in to comment.