Skip to content

Commit

Permalink
drm/i915: hold dev->struct_mutex and DRM lock during vblank ring oper…
Browse files Browse the repository at this point in the history
…ations

To synchronize clip lists with the X server, the DRM lock must be held while
looking at drawable clip lists. To synchronize with other ring access, the
ring mutex must be held while inserting commands into the ring.  Failure to
do the first resulted in easy visual corruption when moving windows, and the
second could have corrupted the ring with DRI2.

Grabbing the DRM lock involves using the DRM tasklet mechanism, grabbing the
ring mutex means potentially sleeping. Deal with both of these by always
running the tasklet from a work handler.

Also, protect from clip list changes since the vblank request was queued by
making sure the window has at least one rectangle while looking inside,
preventing oopses .

Signed-off-by: Keith Packard <keithp@keithp.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
  • Loading branch information
Keith Packard authored and Dave Airlie committed Oct 23, 2008
1 parent fe8133d commit 9e44af7
Show file tree
Hide file tree
Showing 4 changed files with 92 additions and 60 deletions.
2 changes: 2 additions & 0 deletions drivers/gpu/drm/drm_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
return 0;
}
EXPORT_SYMBOL(drm_lock_take);

/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
Expand Down Expand Up @@ -299,6 +300,7 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
EXPORT_SYMBOL(drm_lock_free);

/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
Expand Down
10 changes: 5 additions & 5 deletions drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ struct mem_block {
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
unsigned int plane;
unsigned int pipe;
unsigned int sequence;
} drm_i915_vbl_swap_t;

Expand Down Expand Up @@ -240,6 +240,9 @@ typedef struct drm_i915_private {
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];

/** Work task for vblank-related ring access */
struct work_struct vblank_work;

struct {
struct drm_mm gtt_space;

Expand Down Expand Up @@ -285,9 +288,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;

/** Work task for vblank-related ring access */
struct work_struct vblank_work;

uint32_t next_gem_seqno;

/**
Expand Down Expand Up @@ -441,7 +441,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);

extern void i915_gem_vblank_work_handler(struct work_struct *work);
extern void i915_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
Expand Down
2 changes: 0 additions & 2 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -2550,8 +2550,6 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
INIT_WORK(&dev_priv->mm.vblank_work,
i915_gem_vblank_work_handler);
dev_priv->mm.next_gem_seqno = 1;

i915_gem_detect_bit_6_swizzle(dev);
Expand Down
138 changes: 85 additions & 53 deletions drivers/gpu/drm/i915/i915_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,9 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
* Emit blits for scheduled buffer swaps.
*
* This function will be called with the HW lock held.
* Because this function must grab the ring mutex (dev->struct_mutex),
* it can no longer run at soft irq time. We'll fix this when we do
* the DRI2 swap buffer work.
*/
static void i915_vblank_tasklet(struct drm_device *dev)
{
Expand All @@ -141,6 +144,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
RING_LOCALS;

mutex_lock(&dev->struct_mutex);

if (IS_I965G(dev) && sarea_priv->front_tiled) {
cmd |= XY_SRC_COPY_BLT_DST_TILED;
dst_pitch >>= 2;
Expand All @@ -150,8 +155,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
src_pitch >>= 2;
}

counter[0] = drm_vblank_count(dev, 0);
counter[1] = drm_vblank_count(dev, 1);
counter[0] = drm_vblank_count(dev, i915_get_plane(dev, 0));
counter[1] = drm_vblank_count(dev, i915_get_plane(dev, 1));

DRM_DEBUG("\n");

Expand All @@ -165,7 +170,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
int pipe = i915_get_pipe(dev, vbl_swap->plane);
int pipe = vbl_swap->pipe;

if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
Expand All @@ -179,20 +184,19 @@ static void i915_vblank_tasklet(struct drm_device *dev)

drw = drm_get_drawable_info(dev, vbl_swap->drw_id);

if (!drw) {
spin_unlock(&dev->drw_lock);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
spin_lock(&dev_priv->swaps_lock);
continue;
}

list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_cmp =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_drawable_info *drw_cmp =
drm_get_drawable_info(dev, swap_cmp->drw_id);

if (drw_cmp &&
/* Make sure both drawables are still
* around and have some rectangles before
* we look inside to order them for the
* blts below.
*/
if (drw_cmp && drw_cmp->num_rects > 0 &&
drw && drw->num_rects > 0 &&
drw_cmp->rects[0].y1 > drw->rects[0].y1) {
list_add_tail(list, hit);
break;
Expand All @@ -212,6 +216,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)

if (nhits == 0) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
mutex_unlock(&dev->struct_mutex);
return;
}

Expand Down Expand Up @@ -265,18 +270,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
int num_rects, plane;
int num_rects, pipe;
unsigned short top, bottom;

drw = drm_get_drawable_info(dev, swap_hit->drw_id);

/* The drawable may have been destroyed since
* the vblank swap was queued
*/
if (!drw)
continue;

rect = drw->rects;
plane = swap_hit->plane;
top = upper[plane];
bottom = lower[plane];
pipe = swap_hit->pipe;
top = upper[pipe];
bottom = lower[pipe];

for (num_rects = drw->num_rects; num_rects--; rect++) {
int y1 = max(rect->y1, top);
Expand All @@ -302,6 +310,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}

spin_unlock_irqrestore(&dev->drw_lock, irqflags);
mutex_unlock(&dev->struct_mutex);

list_for_each_safe(hit, tmp, &hits) {
drm_i915_vbl_swap_t *swap_hit =
Expand Down Expand Up @@ -350,18 +359,37 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
}

void
i915_gem_vblank_work_handler(struct work_struct *work)
i915_vblank_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
vblank_work);
struct drm_device *dev = dev_priv->dev;
unsigned long irqflags;

if (dev->lock.hw_lock == NULL) {
i915_vblank_tasklet(dev);
return;
}

dev_priv = container_of(work, drm_i915_private_t,
mm.vblank_work);
dev = dev_priv->dev;
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
dev->locked_tasklet_func = i915_vblank_tasklet;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);

/* Try to get the lock now, if this fails, the lock
* holder will execute the tasklet during unlock
*/
if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
return;

dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);

spin_lock_irqsave(&dev->tasklet_lock, irqflags);
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);

mutex_lock(&dev->struct_mutex);
i915_vblank_tasklet(dev);
mutex_unlock(&dev->struct_mutex);
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}

irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
Expand Down Expand Up @@ -441,12 +469,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);

if (vblank && dev_priv->swaps_pending > 0) {
if (dev_priv->ring.ring_obj == NULL)
drm_locked_tasklet(dev, i915_vblank_tasklet);
else
schedule_work(&dev_priv->mm.vblank_work);
}
if (vblank && dev_priv->swaps_pending > 0)
schedule_work(&dev_priv->vblank_work);

return IRQ_HANDLED;
}
Expand Down Expand Up @@ -706,7 +730,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
drm_i915_vbl_swap_t *vbl_swap;
drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
unsigned int pipe, seqtype, curseq, plane;
unsigned long irqflags;
struct list_head *list;
Expand Down Expand Up @@ -770,45 +794,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
}
}

vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);

if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
drm_vblank_put(dev, pipe);
return -ENOMEM;
}

vbl_swap->drw_id = swap->drawable;
vbl_swap->pipe = pipe;
vbl_swap->sequence = swap->sequence;

spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);

list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);

if (vbl_swap->drw_id == swap->drawable &&
vbl_swap->plane == plane &&
vbl_swap->sequence == swap->sequence) {
if (vbl_old->drw_id == swap->drawable &&
vbl_old->pipe == pipe &&
vbl_old->sequence == swap->sequence) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
drm_vblank_put(dev, pipe);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}

spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);

if (dev_priv->swaps_pending >= 100) {
if (dev_priv->swaps_pending >= 10) {
DRM_DEBUG("Too many swaps queued\n");
DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
drm_vblank_count(dev, i915_get_plane(dev, 0)),
drm_vblank_count(dev, i915_get_plane(dev, 1)));

list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
vbl_old->drw_id, vbl_old->pipe,
vbl_old->sequence);
}
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
drm_vblank_put(dev, pipe);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
return -EBUSY;
}

vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);

if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
drm_vblank_put(dev, pipe);
return -ENOMEM;
}

DRM_DEBUG("\n");

vbl_swap->drw_id = swap->drawable;
vbl_swap->plane = plane;
vbl_swap->sequence = swap->sequence;

spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);

list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;

Expand All @@ -835,6 +866,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)

spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
dev_priv->swaps_pending = 0;

/* Set initial unmasked IRQs to just the selected vblank pipes. */
Expand Down

0 comments on commit 9e44af7

Please sign in to comment.