Skip to content

Commit

Permalink
net: mvneta: make tx buffer array agnostic
Browse files Browse the repository at this point in the history
Allow tx buffer array to contain both skb and xdp buffers in order to
enable xdp frame recycling adding XDP_TX verdict support

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Lorenzo Bianconi authored and David S. Miller committed Oct 21, 2019
1 parent fa383f6 commit 9e58c8b
Showing 1 changed file with 43 additions and 23 deletions.
66 changes: 43 additions & 23 deletions drivers/net/ethernet/marvell/mvneta.c
Original file line number Diff line number Diff line change
Expand Up @@ -561,6 +561,20 @@ struct mvneta_rx_desc {
};
#endif

enum mvneta_tx_buf_type {
MVNETA_TYPE_SKB,
MVNETA_TYPE_XDP_TX,
MVNETA_TYPE_XDP_NDO,
};

struct mvneta_tx_buf {
enum mvneta_tx_buf_type type;
union {
struct xdp_frame *xdpf;
struct sk_buff *skb;
};
};

struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
u8 id;
Expand All @@ -576,8 +590,8 @@ struct mvneta_tx_queue {
int tx_stop_threshold;
int tx_wake_threshold;

/* Array of transmitted skb */
struct sk_buff **tx_skb;
/* Array of transmitted buffers */
struct mvneta_tx_buf *buf;

/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
Expand Down Expand Up @@ -1780,24 +1794,22 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
int i;

for (i = 0; i < num; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];

if (skb) {
bytes_compl += skb->len;
pkts_compl++;
}

mvneta_txq_inc_get(txq);

if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
if (!skb)
if (!buf->skb)
continue;
dev_kfree_skb_any(skb);

bytes_compl += buf->skb->len;
pkts_compl++;
dev_kfree_skb_any(buf->skb);
}

netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
Expand Down Expand Up @@ -2324,16 +2336,19 @@ static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
struct mvneta_tx_desc *tx_desc;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;

txq->tx_skb[txq->txq_put_index] = NULL;
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
buf->type = MVNETA_TYPE_SKB;
buf->skb = NULL;

mvneta_txq_inc_put(txq);
}

Expand All @@ -2342,6 +2357,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
struct sk_buff *skb, char *data, int size,
bool last_tcp, bool is_last)
{
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;

tx_desc = mvneta_txq_next_desc_get(txq);
Expand All @@ -2355,15 +2371,16 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
}

tx_desc->command = 0;
txq->tx_skb[txq->txq_put_index] = NULL;
buf->type = MVNETA_TYPE_SKB;
buf->skb = NULL;

if (last_tcp) {
/* last descriptor in the TCP packet */
tx_desc->command = MVNETA_TXD_L_DESC;

/* last descriptor in SKB */
if (is_last)
txq->tx_skb[txq->txq_put_index] = skb;
buf->skb = skb;
}
mvneta_txq_inc_put(txq);
return 0;
Expand Down Expand Up @@ -2448,6 +2465,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
int i, nr_frags = skb_shinfo(skb)->nr_frags;

for (i = 0; i < nr_frags; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);

Expand All @@ -2467,12 +2485,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
txq->tx_skb[txq->txq_put_index] = skb;
buf->skb = skb;
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
txq->tx_skb[txq->txq_put_index] = NULL;
buf->skb = NULL;
}
buf->type = MVNETA_TYPE_SKB;
mvneta_txq_inc_put(txq);
}

Expand Down Expand Up @@ -2500,6 +2519,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
int len = skb->len;
int frags = 0;
Expand Down Expand Up @@ -2532,16 +2552,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}

buf->type = MVNETA_TYPE_SKB;
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVNETA_TXD_FLZ_DESC;
tx_desc->command = tx_cmd;
txq->tx_skb[txq->txq_put_index] = skb;
buf->skb = skb;
mvneta_txq_inc_put(txq);
} else {
/* First but not Last */
tx_cmd |= MVNETA_TXD_F_DESC;
txq->tx_skb[txq->txq_put_index] = NULL;
buf->skb = NULL;
mvneta_txq_inc_put(txq);
tx_desc->command = tx_cmd;
/* Continue with other skb fragments */
Expand Down Expand Up @@ -3128,9 +3149,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,

txq->last_desc = txq->size - 1;

txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
GFP_KERNEL);
if (!txq->tx_skb) {
txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
if (!txq->buf) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
Expand All @@ -3142,7 +3162,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) {
kfree(txq->tx_skb);
kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
Expand Down Expand Up @@ -3195,7 +3215,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);

kfree(txq->tx_skb);
kfree(txq->buf);

if (txq->tso_hdrs)
dma_free_coherent(pp->dev->dev.parent,
Expand Down

0 comments on commit 9e58c8b

Please sign in to comment.