Skip to content

Commit

Permalink
drm/i915: remove LP_RING&friends from modeset code
Browse files Browse the repository at this point in the history
The LP refers to 'low priority' as opposed to the high priority
ring on gen2/3. So lets constrain its use to the code of that era.

Unfortunately we can't yet completely remove the associated
macros from common headers and shove them into i915_dma.c to
the other dri1 legacy support code, a few cleanups are still
missing for that.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
Daniel Vetter committed May 3, 2012
1 parent 64c43c3 commit 6d90c95
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 76 deletions.
78 changes: 41 additions & 37 deletions drivers/gpu/drm/i915/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -5749,16 +5749,17 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;

/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;

ret = BEGIN_LP_RING(6);
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;

Expand All @@ -5769,14 +5770,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(0); /* aux display base address, unused */
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_ring_advance(ring);
return 0;

err_unpin:
Expand All @@ -5794,32 +5795,33 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;

/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;

ret = BEGIN_LP_RING(6);
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;

if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);

ADVANCE_LP_RING();
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + offset);
intel_ring_emit(ring, MI_NOOP);

intel_ring_advance(ring);
return 0;

err_unpin:
Expand All @@ -5836,33 +5838,34 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;

ret = BEGIN_LP_RING(4);
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;

/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);

/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
intel_ring_emit(ring, pf | pipesrc);
intel_ring_advance(ring);
return 0;

err_unpin:
Expand All @@ -5878,26 +5881,27 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;

ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;

ret = BEGIN_LP_RING(4);
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;

OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0] | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, obj->gtt_offset);

pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
intel_ring_emit(ring, pf | pipesrc);
intel_ring_advance(ring);
return 0;

err_unpin:
Expand Down
58 changes: 32 additions & 26 deletions drivers/gpu/drm/i915/intel_overlay.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,17 +215,18 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

BUG_ON(overlay->last_flip_req);
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
Expand Down Expand Up @@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
Expand All @@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}

ret = BEGIN_LP_RING(4);
ret = intel_ring_begin(ring, 4);
if (ret) {
kfree(request);
goto out;
}

OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);

ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
Expand All @@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
Expand All @@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);

ret = BEGIN_LP_RING(2);
ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);

ret = i915_add_request(LP_RING(dev_priv), NULL, request);
ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
Expand Down Expand Up @@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
int ret;
Expand All @@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;

ret = BEGIN_LP_RING(6);
ret = intel_ring_begin(ring, 6);
if (ret) {
kfree(request);
return ret;
}
/* wait for overlay to go idle */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_advance(ring);

return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
Expand All @@ -442,12 +446,13 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

if (overlay->last_flip_req == 0)
return 0;

ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
Expand All @@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

/* Only wait if there is actually an old frame to release to
Expand All @@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;

ret = BEGIN_LP_RING(2);
ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}

OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);

ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
Expand Down
27 changes: 14 additions & 13 deletions drivers/gpu/drm/i915/intel_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2436,6 +2436,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;

/* rc6 disabled by default due to repeated reports of hanging during
Expand All @@ -2455,31 +2456,31 @@ void ironlake_enable_rc6(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = BEGIN_LP_RING(6);
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}

OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_SUSPEND_FLUSH);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
intel_ring_emit(ring, MI_SUSPEND_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_FLUSH);
intel_ring_advance(ring);

/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(LP_RING(dev_priv));
ret = intel_wait_ring_idle(ring);
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
Expand Down

0 comments on commit 6d90c95

Please sign in to comment.