Skip to content

Commit

Permalink
bnxt: refactor bnxt_rx_pages operate on skb_shared_info
Browse files Browse the repository at this point in the history
Rather than operating on an sk_buff, add frags from the aggregation
ring into the frags of an skb_shared_info.  This will allow the
caller to use either an sk_buff or xdp_buff.

Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Andy Gospodarek authored and David S. Miller committed Apr 8, 2022
1 parent ee536dc commit ca1df2d
Showing 1 changed file with 33 additions and 17 deletions.
50 changes: 33 additions & 17 deletions drivers/net/ethernet/broadcom/bnxt/bnxt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1038,22 +1038,23 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}

static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb, u16 idx,
u32 agg_bufs, bool tpa)
static u32 __bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
struct skb_shared_info *shinfo,
u16 idx, u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
u32 i;

if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
p5_tpa = true;

for (i = 0; i < agg_bufs; i++) {
skb_frag_t *frag = &shinfo->frags[i];
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
Expand All @@ -1069,8 +1070,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;

cons_rx_buf = &rxr->rx_agg_ring[cons];
skb_fill_page_desc(skb, i, cons_rx_buf->page,
cons_rx_buf->offset, frag_len);
skb_frag_off_set(frag, cons_rx_buf->offset);
skb_frag_size_set(frag, frag_len);
__skb_frag_set_page(frag, cons_rx_buf->page);
shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);

/* It is possible for bnxt_alloc_rx_page() to allocate
Expand All @@ -1082,36 +1085,49 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
cons_rx_buf->page = NULL;

if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
struct skb_shared_info *shinfo;
unsigned int nr_frags;

shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);

dev_kfree_skb(skb);

cons_rx_buf->page = page;

/* Update prod since possibly some pages have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
return NULL;
return 0;
}

dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
DMA_FROM_DEVICE,
DMA_ATTR_WEAK_ORDERING);

skb->data_len += frag_len;
skb->len += frag_len;
skb->truesize += PAGE_SIZE;

total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
return total_frag_len;
}

static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb, u16 idx,
u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;

total_frag_len = __bnxt_rx_pages(bp, cpr, shinfo, idx, agg_bufs, tpa);

if (!total_frag_len) {
dev_kfree_skb(skb);
return NULL;
}

skb->data_len += total_frag_len;
skb->len += total_frag_len;
skb->truesize += PAGE_SIZE * agg_bufs;
return skb;
}

Expand Down

0 comments on commit ca1df2d

Please sign in to comment.