Skip to content

Commit

Permalink
Merge branch 'enetc-bug-fixes-for-bpf_xdp_adjust_head-and-bpf_xdp_adj…
Browse files Browse the repository at this point in the history
…ust_tail'

Vladimir Oltean says:

====================
ENETC bug fixes for bpf_xdp_adjust_head() and bpf_xdp_adjust_tail()

It has been reported that on the ENETC driver, bpf_xdp_adjust_head()
and bpf_xdp_adjust_tail() are broken in combination with the XDP_PASS
verdict. I have constructed a series a simple XDP programs and tested
with various packet sizes and confirmed that this is the case.

Patch 3/3 fixes the core issue, which is that the sk_buff created on
XDP_PASS is created by the driver as if XDP never ran, but in fact the
geometry needs to be adjusted according to the delta applied by the
program on the original xdp_buff. It depends on commit 539c1fb
("xdp: add generic xdp_build_skb_from_buff()") which is not available in
"stable" but perhaps should be.

Patch 2/3 is a small refactor necessary for 3/3.

Patch 1/3 fixes a related issue I noticed, which is that
bpf_xdp_adjust_tail() with a positive offset works for linear XDP
buffers, but returns an error for non-linear ones, even if there is
plenty of space in the final page fragment.
====================

Link: https://patch.msgid.link/20250417120005.3288549-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Apr 22, 2025
2 parents cc3628d + 020f0c8 commit b1eac30
Showing 1 changed file with 28 additions and 17 deletions.
45 changes: 28 additions & 17 deletions drivers/net/ethernet/freescale/enetc/enetc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1850,6 +1850,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}

static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
int rx_ring_last)
{
while (rx_ring_first != rx_ring_last) {
enetc_flip_rx_buff(rx_ring,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
}

static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
Expand All @@ -1868,11 +1878,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,

while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
int orig_i, err;
u32 bd_status;
int err;

rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
Expand All @@ -1887,7 +1896,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;

orig_rxbd = rxbd;
orig_cleaned_cnt = cleaned_cnt;
orig_i = i;

enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
Expand Down Expand Up @@ -1915,15 +1923,21 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
rxbd = orig_rxbd;
cleaned_cnt = orig_cleaned_cnt;
i = orig_i;

skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
&i, &cleaned_cnt,
ENETC_RXB_DMA_SIZE_XDP);
if (unlikely(!skb))
skb = xdp_build_skb_from_buff(&xdp_buff);
/* Probably under memory pressure, stop NAPI */
if (unlikely(!skb)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_drops++;
goto out;
}

enetc_get_offloads(rx_ring, orig_rxbd, skb);

/* These buffers are about to be owned by the stack.
* Update our buffer cache (the rx_swbd array elements)
* with their other page halves.
*/
enetc_bulk_flip_buff(rx_ring, orig_i, i);

napi_gro_receive(napi, skb);
break;
Expand Down Expand Up @@ -1965,11 +1979,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
while (orig_i != i) {
enetc_flip_rx_buff(rx_ring,
&rx_ring->rx_swbd[orig_i]);
enetc_bdr_idx_inc(rx_ring, &orig_i);
}
enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
Expand Down Expand Up @@ -3362,7 +3372,8 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
bdr->buffer_offset = ENETC_RXB_PAD;
priv->rx_ring[i] = bdr;

err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
ENETC_RXB_DMA_SIZE_XDP);
if (err)
goto free_vector;

Expand Down

0 comments on commit b1eac30

Please sign in to comment.