Skip to content

Commit

Permalink
drm/i915: Kill i915_gem_execbuffer_wait_for_flips()
Browse files Browse the repository at this point in the history
As per Chris Wilson's suggestion make
i915_gem_execbuffer_wait_for_flips() go away.

This was used to stall the GPU ring while there are pending
page flips involving the relevant BO. Ie. while the BO is still
being scanned out by the display controller.

The recommended alternative is to use the page flip events to
wait for the page flips to fully complete before reusing the BO
of the old front buffer. Or use more buffers.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Kristian Høgsberg <krh@bitplanet.net>
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
[danvet: don't remove obj->pending_flips, still required due to
reorder patches.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
Ville Syrjälä authored and Daniel Vetter committed Nov 29, 2012
1 parent f930ddd commit ca9c46c
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 49 deletions.
42 changes: 0 additions & 42 deletions drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -601,45 +601,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}

static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;

/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/

for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;

if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

ret = intel_ring_begin(ring, 2);
if (ret)
return ret;

intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}

return 0;
}

static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret;

list_for_each_entry(obj, objects, exec_list) {
Expand All @@ -650,18 +617,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);

if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);

flush_domains |= obj->base.write_domain;
}

if (flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}

if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);

Expand Down
7 changes: 0 additions & 7 deletions drivers/gpu/drm/i915/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -6945,8 +6945,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,

obj = work->old_fb_obj;

atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
wake_up(&dev_priv->pending_flip_queue);

queue_work(dev_priv->wq, &work->work);
Expand Down Expand Up @@ -7292,10 +7290,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,

work->enable_stall_check = true;

/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);

ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
Expand All @@ -7312,7 +7306,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,

cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
Expand Down

0 comments on commit ca9c46c

Please sign in to comment.