Skip to content

Commit

Permalink
virtio_pci: support VIRTIO_F_RING_RESET
Browse files Browse the repository at this point in the history
This patch implements virtio pci support for QUEUE RESET.

Performing reset on a queue is divided into these steps:

 1. notify the device to reset the queue
 2. recycle the buffer submitted
 3. reset the vring (may re-alloc)
 4. mmap vring to device, and enable the queue

This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
pci scenario.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20220801063902.129329-33-xuanzhuo@linux.alibaba.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
  • Loading branch information
Xuan Zhuo authored and Michael S. Tsirkin committed Aug 11, 2022
1 parent 56bdc06 commit 04ca0b0
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 3 deletions.
12 changes: 9 additions & 3 deletions drivers/virtio/virtio_pci_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,15 @@ static void vp_del_vq(struct virtqueue *vq)
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;

spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
/*
* If it fails during re-enable reset vq. This way we won't rejoin
* info->node to the queue. Prevent unexpected irqs.
*/
if (!vq->reset) {
spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
}

vp_dev->del_vq(info);
kfree(info);
Expand Down
88 changes: 88 additions & 0 deletions drivers/virtio/virtio_pci_modern.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);

if (features & BIT_ULL(VIRTIO_F_RING_RESET))
__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
}

/* virtio config->finalize_features() implementation */
Expand Down Expand Up @@ -199,6 +202,87 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
return 0;
}

static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtio_pci_vq_info *info;
unsigned long flags;

if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
return -ENOENT;

vp_modern_set_queue_reset(mdev, vq->index);

info = vp_dev->vqs[vq->index];

/* delete vq from irq handler */
spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);

INIT_LIST_HEAD(&info->node);

#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
__virtqueue_break(vq);
#endif

/* For the case where vq has an exclusive irq, call synchronize_irq() to
* wait for completion.
*
* note: We can't use disable_irq() since it conflicts with the affinity
* managed IRQ that is used by some drivers.
*/
if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));

vq->reset = true;

return 0;
}

static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
struct virtio_pci_vq_info *info;
unsigned long flags, index;
int err;

if (!vq->reset)
return -EBUSY;

index = vq->index;
info = vp_dev->vqs[index];

if (vp_modern_get_queue_reset(mdev, index))
return -EBUSY;

if (vp_modern_get_queue_enable(mdev, index))
return -EBUSY;

err = vp_active_vq(vq, info->msix_vector);
if (err)
return err;

if (vq->callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
}

#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
__virtqueue_unbreak(vq);
#endif

vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
vq->reset = false;

return 0;
}

static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
Expand Down Expand Up @@ -413,6 +497,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};

static const struct virtio_config_ops virtio_pci_config_ops = {
Expand All @@ -431,6 +517,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
};

/* the PCI probing function */
Expand Down

0 comments on commit 04ca0b0

Please sign in to comment.