Skip to content

Commit

Permalink
virtio-gpu: add & use virtio_gpu_queue_fenced_ctrl_buffer
Browse files Browse the repository at this point in the history
Add helper function to handle the submission of fenced control requests.
Make sure we initialize the fence while holding the virtqueue lock, so
requests can't be reordered.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
  • Loading branch information
Gerd Hoffmann committed Oct 16, 2015
1 parent 9c73f47 commit ec2f057
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 7 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/virtio/virtgpu_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags;

*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL);
*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
if ((*fence) == NULL)
return -ENOMEM;

Expand Down
40 changes: 34 additions & 6 deletions drivers/gpu/drm/virtio/virtgpu_vq.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,38 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
return rc;
}

static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence **fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;

again:
spin_lock(&vgdev->ctrlq.qlock);

/*
* Make sure we have enouth space in the virtqueue. If not
* wait here until we have.
*
* Without that virtio_gpu_queue_ctrl_buffer_nolock might have
* to wait for free space, which can result in fence ids being
* submitted out-of-order.
*/
if (vq->num_free < 3) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
goto again;
}

if (fence)
virtio_gpu_fence_emit(vgdev, hdr, fence);
rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock);
return rc;
}

static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
Expand Down Expand Up @@ -499,9 +531,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.x = x;
cmd_p->r.y = y;

if (fence)
virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}

static void
Expand All @@ -524,9 +554,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;

if (fence)
virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}

static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
Expand Down

0 comments on commit ec2f057

Please sign in to comment.