Skip to content

Commit

Permalink
virtio_net: rx remove premapped failover code
Browse files Browse the repository at this point in the history
Now, the premapped mode can be enabled unconditionally.

So we can remove the failover code for merge and small mode.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
Link: https://lore.kernel.org/r/20240511031404.30903-4-xuanzhuo@linux.alibaba.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Xuan Zhuo authored and Jakub Kicinski committed May 14, 2024
1 parent a377ae5 commit defd28a
Showing 1 changed file with 35 additions and 50 deletions.
85 changes: 35 additions & 50 deletions drivers/net/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -348,9 +348,6 @@ struct receive_queue {

/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;

/* Do dma by self */
bool do_dma;
};

/* This structure can contain rss message with maximum settings for indirection table and keysize
Expand Down Expand Up @@ -850,7 +847,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;

buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
if (buf && rq->do_dma)
if (buf)
virtnet_rq_unmap(rq, buf, *len);

return buf;
Expand All @@ -863,11 +860,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;

if (!rq->do_dma) {
sg_init_one(rq->sg, buf, len);
return;
}

head = page_address(rq->alloc_frag.page);

offset = buf - head;
Expand All @@ -893,44 +885,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)

head = page_address(alloc_frag->page);

if (rq->do_dma) {
dma = head;

/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
dma = head;

dma->len = alloc_frag->size - sizeof(*dma);
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}

addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->len = alloc_frag->size - sizeof(*dma);

dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;

/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);

rq->last_dma = dma;
}
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);

++dma->ref;
rq->last_dma = dma;
}

++dma->ref;

buf = head + alloc_frag->offset;

get_page(alloc_frag->page);
Expand All @@ -947,12 +937,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
if (!vi->mergeable_rx_bufs && vi->big_packets)
return;

for (i = 0; i < vi->max_queue_pairs; i++) {
if (virtqueue_set_dma_premapped(vi->rq[i].vq))
continue;

vi->rq[i].do_dma = true;
}
for (i = 0; i < vi->max_queue_pairs; i++)
/* error should never happen */
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
}

static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
Expand Down Expand Up @@ -2030,8 +2017,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,

err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}

Expand Down Expand Up @@ -2145,8 +2131,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}

Expand Down Expand Up @@ -5229,7 +5214,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
Expand Down

0 comments on commit defd28a

Please sign in to comment.