Skip to content

Commit

Permalink
Merge branch 'virtio-net-link-queues-to-napis'
Browse files Browse the repository at this point in the history
Joe Damato says:

====================
virtio-net: Link queues to NAPIs

Jakub recently commented [1] that I should not hold this series on
virtio-net linking queues to NAPIs behind other important work that is
on-going and suggested I re-spin, so here we are :)

As per the discussion on the v3 [2], now both RX and TX NAPIs use the
API to link queues to NAPIs. Since TX-only NAPIs don't have a NAPI ID,
commit 6597e8d ("netdev-genl: Elide napi_id when not present") now
correctly elides the TX-only NAPIs (instead of printing zero) when the
queues and NAPIs are linked.

As per the discussion on the v4 [3], patch 3 has been refactored to hold
RTNL only in the specific locations which need it as Jason requested.

As per the discussion on the v5 [4], patch 3 now leaves refill_work
as-is and does not use the API to unlink and relink queues and NAPIs. A
comment has been left as suggested by Jakub [5] for future work.

See the commit message of patch 3 for an example of how to get the NAPI
to queue mapping information.

See the commit message of patch 4 for an example of how NAPI IDs are
persistent despite queue count changes.

[1]: https://lore.kernel.org/20250221142650.3c74dcac@kernel.org
[2]: https://lore.kernel.org/20250127142400.24eca319@kernel.org
[3]: https://lore.kernel.org/CACGkMEv=ejJnOWDnAu7eULLvrqXjkMkTL4cbi-uCTUhCpKN_GA@mail.gmail.com
[4]: https://lore.kernel.org/Z8X15hxz8t-vXpPU@LQ3V64L9R2
[5]: https://lore.kernel.org/20250303160355.5f8d82d8@kernel.org

v5: https://lore.kernel.org/20250227185017.206785-1-jdamato@fastly.com
v4: https://lore.kernel.org/20250225020455.212895-1-jdamato@fastly.com
rfcv3: https://lore.kernel.org/20250121191047.269844-1-jdamato@fastly.com
v2: https://lore.kernel.org/20250116055302.14308-1-jdamato@fastly.com
v1: https://lore.kernel.org/20250110202605.429475-1-jdamato@fastly.com
====================

Link: https://patch.msgid.link/20250307011215.266806-1-jdamato@fastly.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Mar 10, 2025
2 parents 8ef890d + d5d7152 commit 48c57a4
Showing 1 changed file with 74 additions and 27 deletions.
101 changes: 74 additions & 27 deletions drivers/net/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -2807,7 +2807,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}

static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
static void virtnet_napi_do_enable(struct virtqueue *vq,
struct napi_struct *napi)
{
napi_enable(napi);

Expand All @@ -2820,10 +2821,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}

static void virtnet_napi_tx_enable(struct virtnet_info *vi,
struct virtqueue *vq,
struct napi_struct *napi)
static void virtnet_napi_enable(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
int qidx = vq2rxq(rq->vq);

virtnet_napi_do_enable(rq->vq, &rq->napi);
netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
}

static void virtnet_napi_tx_enable(struct send_queue *sq)
{
struct virtnet_info *vi = sq->vq->vdev->priv;
struct napi_struct *napi = &sq->napi;
int qidx = vq2txq(sq->vq);

if (!napi->weight)
return;

Expand All @@ -2835,13 +2847,30 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}

return virtnet_napi_enable(vq, napi);
virtnet_napi_do_enable(sq->vq, napi);
netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
}

static void virtnet_napi_tx_disable(struct napi_struct *napi)
static void virtnet_napi_tx_disable(struct send_queue *sq)
{
if (napi->weight)
struct virtnet_info *vi = sq->vq->vdev->priv;
struct napi_struct *napi = &sq->napi;
int qidx = vq2txq(sq->vq);

if (napi->weight) {
netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(napi);
}
}

static void virtnet_napi_disable(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct napi_struct *napi = &rq->napi;
int qidx = vq2rxq(rq->vq);

netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
napi_disable(napi);
}

static void refill_work(struct work_struct *work)
Expand All @@ -2854,9 +2883,23 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];

/*
* When queue API support is added in the future and the call
* below becomes napi_disable_locked, this driver will need to
* be refactored.
*
* One possible solution would be to:
* - cancel refill_work with cancel_delayed_work (note:
* non-sync)
* - cancel refill_work with cancel_delayed_work_sync in
* virtnet_remove after the netdev is unregistered
* - wrap all of the work in a lock (perhaps the netdev
* instance lock)
* - check netif_running() and return early to avoid a race
*/
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq->vq, &rq->napi);
virtnet_napi_do_enable(rq->vq, &rq->napi);

/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
Expand Down Expand Up @@ -3053,8 +3096,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)

static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
napi_disable(&vi->rq[qp_index].napi);
virtnet_napi_tx_disable(&vi->sq[qp_index]);
virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}

Expand All @@ -3073,8 +3116,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;

virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
virtnet_napi_enable(&vi->rq[qp_index]);
virtnet_napi_tx_enable(&vi->sq[qp_index]);

return 0;

Expand Down Expand Up @@ -3326,7 +3369,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
bool running = netif_running(vi->dev);

if (running) {
napi_disable(&rq->napi);
virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
Expand All @@ -3339,7 +3382,7 @@ static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
schedule_delayed_work(&vi->refill, 0);

if (running)
virtnet_napi_enable(rq->vq, &rq->napi);
virtnet_napi_enable(rq);
}

static int virtnet_rx_resize(struct virtnet_info *vi,
Expand Down Expand Up @@ -3368,7 +3411,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;

if (running)
virtnet_napi_tx_disable(&sq->napi);
virtnet_napi_tx_disable(sq);

txq = netdev_get_tx_queue(vi->dev, qindex);

Expand Down Expand Up @@ -3402,7 +3445,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_unlock_bh(txq);

if (running)
virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
virtnet_napi_tx_enable(sq);
}

static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
Expand Down Expand Up @@ -5634,8 +5677,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
if (netif_running(vi->dev))
if (netif_running(vi->dev)) {
rtnl_lock();
virtnet_close(vi->dev);
rtnl_unlock();
}
}

static int init_vqs(struct virtnet_info *vi);
Expand All @@ -5655,7 +5701,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
enable_rx_mode_work(vi);

if (netif_running(vi->dev)) {
rtnl_lock();
err = virtnet_open(vi->dev);
rtnl_unlock();
if (err)
return err;
}
Expand Down Expand Up @@ -5945,8 +5993,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
virtnet_napi_disable(&vi->rq[i]);
virtnet_napi_tx_disable(&vi->sq[i]);
}
}

Expand Down Expand Up @@ -5983,9 +6031,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
if (netif_running(dev)) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
virtnet_napi_enable(&vi->rq[i]);
virtnet_napi_tx_enable(&vi->sq[i]);
}
}

Expand All @@ -6000,9 +6047,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,

if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
&vi->sq[i].napi);
virtnet_napi_enable(&vi->rq[i]);
virtnet_napi_tx_enable(&vi->sq[i]);
}
}
if (prog)
Expand Down Expand Up @@ -6409,8 +6455,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight);
netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
i);
vi->rq[i].napi.weight = napi_weight;
netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
Expand Down

0 comments on commit 48c57a4

Please sign in to comment.