Skip to content

Commit

Permalink
drm/radeon: Move pinning the BO back to radeon_crtc_page_flip()
Browse files Browse the repository at this point in the history
As well as enabling the vblank interrupt. These shouldn't take any
significant amount of time, but at least pinning the BO has actually been
seen to fail in practice before, in which case we need to let userspace
know about it.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
  • Loading branch information
Michel Dänzer authored and Alex Deucher committed Jul 17, 2014
1 parent f53f81b commit c60381b
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 91 deletions.
3 changes: 1 addition & 2 deletions drivers/gpu/drm/radeon/radeon.h
Original file line number Diff line number Diff line change
Expand Up @@ -684,10 +684,9 @@ struct radeon_flip_work {
struct work_struct unpin_work;
struct radeon_device *rdev;
int crtc_id;
struct drm_framebuffer *fb;
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
struct radeon_bo *new_rbo;
struct radeon_fence *fence;
};

Expand Down
181 changes: 92 additions & 89 deletions drivers/gpu/drm/radeon/radeon_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,11 +386,6 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];

struct drm_crtc *crtc = &radeon_crtc->base;
struct drm_framebuffer *fb = work->fb;

uint32_t tiling_flags, pitch_pixels;
uint64_t base;

unsigned long flags;
int r;

Expand All @@ -411,26 +406,94 @@ static void radeon_flip_work_func(struct work_struct *__work)
radeon_fence_unref(&work->fence);
}

/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);

/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);

/* set the proper interrupt */
radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);

radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
up_read(&rdev->exclusive_lock);

return;

cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
up_read(&rdev->exclusive_lock);
}

static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *old_radeon_fb;
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_flip_work *work;
struct radeon_bo *new_rbo;
uint32_t tiling_flags, pitch_pixels;
uint64_t base;
unsigned long flags;
int r;

work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;

INIT_WORK(&work->flip_work, radeon_flip_work_func);
INIT_WORK(&work->unpin_work, radeon_unpin_work_func);

work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
work->event = event;

/* schedule unpin of the old buffer */
old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
obj = old_radeon_fb->obj;

/* take a reference to the old object */
drm_gem_object_reference(obj);
work->old_rbo = gem_to_radeon_bo(obj);

new_radeon_fb = to_radeon_framebuffer(fb);
obj = new_radeon_fb->obj;
new_rbo = gem_to_radeon_bo(obj);

spin_lock(&new_rbo->tbo.bdev->fence_lock);
if (new_rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
spin_unlock(&new_rbo->tbo.bdev->fence_lock);

/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, work->new_rbo);
DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
work->old_rbo, new_rbo);

r = radeon_bo_reserve(work->new_rbo, false);
r = radeon_bo_reserve(new_rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
goto cleanup;
}
/* Only 27 bit offset for legacy CRTC */
r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(work->new_rbo);
radeon_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(work->new_rbo);
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);

if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
Expand Down Expand Up @@ -467,6 +530,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
}
base &= ~7;
}
work->base = base;

r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
if (r) {
Expand All @@ -477,100 +541,39 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);

/* set the proper interrupt */
radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
goto pflip_cleanup;
}
radeon_crtc->flip_status = RADEON_FLIP_PENDING;
radeon_crtc->flip_work = work;

/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
/* update crtc fb */
crtc->primary->fb = fb;

radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
up_read(&rdev->exclusive_lock);

return;
queue_work(radeon_crtc->flip_queue, &work->flip_work);
return 0;

pflip_cleanup:
if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
radeon_bo_unreserve(work->new_rbo);
radeon_bo_unreserve(new_rbo);

cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
up_read(&rdev->exclusive_lock);
}

static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *old_radeon_fb;
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_flip_work *work;
unsigned long flags;

work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;

INIT_WORK(&work->flip_work, radeon_flip_work_func);
INIT_WORK(&work->unpin_work, radeon_unpin_work_func);

work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
work->fb = fb;
work->event = event;

/* schedule unpin of the old buffer */
old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
obj = old_radeon_fb->obj;

/* take a reference to the old object */
drm_gem_object_reference(obj);
work->old_rbo = gem_to_radeon_bo(obj);

new_radeon_fb = to_radeon_framebuffer(fb);
obj = new_radeon_fb->obj;
work->new_rbo = gem_to_radeon_bo(obj);

spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
if (work->new_rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);

/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);

if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
return -EBUSY;
}
radeon_crtc->flip_status = RADEON_FLIP_PENDING;
radeon_crtc->flip_work = work;

/* update crtc fb */
crtc->primary->fb = fb;

spin_unlock_irqrestore(&crtc->dev->event_lock, flags);

queue_work(radeon_crtc->flip_queue, &work->flip_work);

return 0;
return r;
}

static int
Expand Down

0 comments on commit c60381b

Please sign in to comment.