Skip to content

Commit

Permalink
xsk: i40e: ice: ixgbe: mlx5: Rename xsk zero-copy driver interfaces
Browse files Browse the repository at this point in the history
Rename the AF_XDP zero-copy driver interface functions to better
reflect what they do after the replacement of umems with buffer
pools in the previous commit. Mostly it is about replacing the
umem name from the function names with xsk_buff and also have
them take the a buffer pool pointer instead of a umem. The
various ring functions have also been renamed in the process so
that they have the same naming convention as the internal
functions in xsk_queue.h. This so that it will be clearer what
they do and also for consistency.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-3-git-send-email-magnus.karlsson@intel.com
  • Loading branch information
Magnus Karlsson authored and Daniel Borkmann committed Aug 31, 2020
1 parent 1742b3d commit c465576
Show file tree
Hide file tree
Showing 19 changed files with 179 additions and 167 deletions.
6 changes: 3 additions & 3 deletions drivers/net/ethernet/intel/i40e/i40e_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3138,7 +3138,7 @@ static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL;

return xdp_get_xsk_pool_from_qid(ring->vsi->netdev, qid);
return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}

/**
Expand Down Expand Up @@ -3286,7 +3286,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ret)
return ret;
ring->rx_buf_len =
xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
Expand Down Expand Up @@ -3370,7 +3370,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
writel(0, ring->tail);

if (ring->xsk_pool) {
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
Expand Down
34 changes: 16 additions & 18 deletions drivers/net/ethernet/intel/i40e/i40e_xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,7 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
qid >= netdev->real_num_tx_queues)
return -EINVAL;

err = xsk_buff_dma_map(pool->umem, &vsi->back->pdev->dev,
I40E_RX_DMA_ATTR);
err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;

Expand Down Expand Up @@ -97,7 +96,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
bool if_running;
int err;

pool = xdp_get_xsk_pool_from_qid(netdev, qid);
pool = xsk_get_pool_from_qid(netdev, qid);
if (!pool)
return -EINVAL;

Expand All @@ -110,7 +109,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
}

clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_buff_dma_unmap(pool->umem, I40E_RX_DMA_ATTR);
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);

if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
Expand Down Expand Up @@ -196,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu);
do {
xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) {
ok = false;
goto no_buffers;
Expand Down Expand Up @@ -363,11 +362,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);

if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);

return (int)total_rx_packets;
}
Expand All @@ -390,12 +389,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;

while (budget-- > 0) {
if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;

dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem,
desc.addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);

tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
Expand All @@ -422,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring);

xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}

Expand Down Expand Up @@ -494,13 +492,13 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
tx_ring->next_to_clean -= tx_ring->count;

if (xsk_frames)
xsk_umem_complete_tx(bp->umem, xsk_frames);
xsk_tx_completed(bp, xsk_frames);

i40e_arm_wb(tx_ring, vsi, completed_frames);

out_xmit:
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_pool->umem))
xsk_set_tx_need_wakeup(tx_ring->xsk_pool->umem);
if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
xsk_set_tx_need_wakeup(tx_ring->xsk_pool);

return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
Expand Down Expand Up @@ -591,7 +589,7 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
}

if (xsk_frames)
xsk_umem_complete_tx(bp->umem, xsk_frames);
xsk_tx_completed(bp, xsk_frames);
}

/**
Expand All @@ -607,7 +605,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i;

for (i = 0; i < vsi->num_queue_pairs; i++) {
if (xdp_get_xsk_pool_from_qid(netdev, i))
if (xsk_get_pool_from_qid(netdev, i))
return true;
}

Expand Down
6 changes: 3 additions & 3 deletions drivers/net/ethernet/intel/ice/ice_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);

ring->rx_buf_len =
xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
Expand All @@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL);
if (err)
return err;
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);

dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
Expand Down Expand Up @@ -418,7 +418,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
writel(0, ring->tail);

if (ring->xsk_pool) {
if (!xsk_buff_can_alloc(ring->xsk_pool->umem, num_bufs)) {
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
Expand Down
28 changes: 14 additions & 14 deletions drivers/net/ethernet/intel/ice/ice_xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
!vsi->xsk_pools[qid])
return -EINVAL;

xsk_buff_dma_unmap(vsi->xsk_pools[qid]->umem, ICE_RX_DMA_ATTR);
xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
ice_xsk_remove_pool(vsi, qid);

return 0;
Expand Down Expand Up @@ -348,7 +348,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
vsi->xsk_pools[qid] = pool;
vsi->num_xsk_pools_used++;

err = xsk_buff_dma_map(vsi->xsk_pools[qid]->umem, ice_pf_to_dev(vsi->back),
err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
Expand Down Expand Up @@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu];

do {
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
ret = true;
break;
Expand Down Expand Up @@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);

if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);

return (int)total_rx_packets;
}
Expand Down Expand Up @@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)

tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];

if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;

dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem, desc.addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);

tx_buf->bytecount = desc.len;
Expand All @@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)

if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
xsk_tx_release(xdp_ring->xsk_pool);
}

return budget > 0 && work_done;
Expand Down Expand Up @@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc;

if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);

if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_pool->umem))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool->umem);
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);

ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
Expand Down Expand Up @@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
}

if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
6 changes: 3 additions & 3 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3714,7 +3714,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,

/* configure the packet buffer length */
if (rx_ring->xsk_pool) {
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_pool->umem);
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);

/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
Expand Down Expand Up @@ -4064,7 +4064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
Expand Down Expand Up @@ -4120,7 +4120,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
}

if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);

rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
Expand Down
32 changes: 16 additions & 16 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;

return xdp_get_xsk_pool_from_qid(adapter->netdev, qid);
return xsk_get_pool_from_qid(adapter->netdev, qid);
}

static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
Expand All @@ -35,7 +35,7 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;

err = xsk_buff_dma_map(pool->umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;

Expand Down Expand Up @@ -64,7 +64,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
struct xsk_buff_pool *pool;
bool if_running;

pool = xdp_get_xsk_pool_from_qid(adapter->netdev, qid);
pool = xsk_get_pool_from_qid(adapter->netdev, qid);
if (!pool)
return -EINVAL;

Expand All @@ -75,7 +75,7 @@ static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);

clear_bit(qid, adapter->af_xdp_zc_qps);
xsk_buff_dma_unmap(pool->umem, IXGBE_RX_DMA_ATTR);
xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);

if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
Expand Down Expand Up @@ -150,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
i -= rx_ring->count;

do {
bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!bi->xdp) {
ok = false;
break;
Expand Down Expand Up @@ -345,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;

if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);

return (int)total_rx_packets;
}
Expand Down Expand Up @@ -389,11 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break;
}

if (!xsk_umem_consume_tx(pool->umem, &desc))
if (!xsk_tx_peek_desc(pool, &desc))
break;

dma = xsk_buff_raw_get_dma(pool->umem, desc.addr);
xsk_buff_raw_dma_sync_for_device(pool->umem, dma, desc.len);
dma = xsk_buff_raw_get_dma(pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);

tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
Expand All @@ -419,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)

if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(pool->umem);
xsk_tx_release(pool);
}

return !!budget && work_done;
Expand Down Expand Up @@ -485,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
q_vector->tx.total_packets += total_packets;

if (xsk_frames)
xsk_umem_complete_tx(pool->umem, xsk_frames);
xsk_tx_completed(pool, xsk_frames);

if (xsk_umem_uses_need_wakeup(pool->umem))
xsk_set_tx_need_wakeup(pool->umem);
if (xsk_uses_need_wakeup(pool))
xsk_set_tx_need_wakeup(pool);

return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
Expand Down Expand Up @@ -547,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
}

if (xsk_frames)
xsk_umem_complete_tx(pool->umem, xsk_frames);
xsk_tx_completed(pool, xsk_frames);
}
Loading

0 comments on commit c465576

Please sign in to comment.