Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 357850
b: refs/heads/master
c: ca9c46c
h: refs/heads/master
v: v3
  • Loading branch information
Ville Syrjälä authored and Daniel Vetter committed Nov 29, 2012
1 parent 87c091c commit 72d92be
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 50 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f930ddd0583c1a9e68d80a27d4e5077e795007b1
refs/heads/master: ca9c46c5c77987acf1bf7137bf85e9221bc459ba
42 changes: 0 additions & 42 deletions trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -601,45 +601,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}

static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;

/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/

for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;

if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

ret = intel_ring_begin(ring, 2);
if (ret)
return ret;

intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}

return 0;
}

static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret;

list_for_each_entry(obj, objects, exec_list) {
Expand All @@ -650,18 +617,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);

if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);

flush_domains |= obj->base.write_domain;
}

if (flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}

if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);

Expand Down
7 changes: 0 additions & 7 deletions trunk/drivers/gpu/drm/i915/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -6945,8 +6945,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,

obj = work->old_fb_obj;

atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
wake_up(&dev_priv->pending_flip_queue);

queue_work(dev_priv->wq, &work->work);
Expand Down Expand Up @@ -7292,10 +7290,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,

work->enable_stall_check = true;

/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);

ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
Expand All @@ -7312,7 +7306,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,

cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
Expand Down

0 comments on commit 72d92be

Please sign in to comment.