diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 52b9833fda99a..8bcda2cb3a2e8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -40,6 +40,7 @@ #define AQ_CFG_RX_HDR_SIZE 256U #define AQ_CFG_RX_PAGEORDER 0U +#define AQ_CFG_XDP_PAGEORDER 2U /* LRO */ #define AQ_CFG_IS_LRO_DEF 1U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a418238f63095..1daecd483b8d6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = { "%sQueue[%d] AllocFails", "%sQueue[%d] SkbAllocFails", "%sQueue[%d] Polls", + "%sQueue[%d] PageFlips", + "%sQueue[%d] PageReuses", + "%sQueue[%d] PageFrees", + "%sQueue[%d] XdpAbort", + "%sQueue[%d] XdpDrop", + "%sQueue[%d] XdpPass", + "%sQueue[%d] XdpTx", + "%sQueue[%d] XdpInvalid", + "%sQueue[%d] XdpRedirect", }; static const char * const aq_ethtool_queue_tx_stat_names[] = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e65ce7199dac9..88595863d8bc6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -14,17 +14,22 @@ #include "aq_ptp.h" #include "aq_filters.h" #include "aq_hw_utils.h" +#include "aq_vec.h" #include #include #include #include #include +#include MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); +DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key); +EXPORT_SYMBOL(aq_xdp_locking_key); + static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME; static const struct net_device_ops aq_ndev_ops; @@ -126,9 +131,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) { + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *prog; int err; + prog = READ_ONCE(aq_nic->xdp_prog); + if (prog && !prog->aux->xdp_has_frags && + new_frame_size > AQ_CFG_RX_FRAME_MAX) { + netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n", + ndev->mtu); + return -EOPNOTSUPP; + } + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); if (err < 0) @@ -204,6 +219,25 @@ static int aq_ndev_set_features(struct net_device *ndev, return err; } +static netdev_features_t aq_ndev_fix_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *prog; + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + prog = READ_ONCE(aq_nic->xdp_prog); + if (prog && !prog->aux->xdp_has_frags && + aq_nic->xdp_prog && features & NETIF_F_LRO) { + netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n"); + features &= ~NETIF_F_LRO; + } + + return features; +} + static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) { struct aq_nic_s *aq_nic = netdev_priv(ndev); @@ -410,6 +444,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type, mqprio->qopt.prio_tc_map); } +static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + bool need_update, running = netif_running(ndev); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *old_prog; + + if (prog && !prog->aux->xdp_has_frags) { + if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) { + NL_SET_ERR_MSG_MOD(extack, + "prog does not support XDP frags"); + return -EOPNOTSUPP; + } + + if (prog && ndev->features & NETIF_F_LRO) { + netdev_err(ndev, + "LRO is not supported with single buffer XDP, disabling\n"); + ndev->features &= ~NETIF_F_LRO; + } + } + + need_update = !!aq_nic->xdp_prog != !!prog; + if (running && need_update) + aq_ndev_close(ndev); + + old_prog = xchg(&aq_nic->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (!old_prog && prog) + static_branch_inc(&aq_xdp_locking_key); + else if (old_prog && !prog) + static_branch_dec(&aq_xdp_locking_key); + + if (running && need_update) + return aq_ndev_open(ndev); + + return 0; +} + +static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return aq_xdp_setup(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + static const struct net_device_ops aq_ndev_ops = { .ndo_open = aq_ndev_open, .ndo_stop = aq_ndev_close, @@ -418,10 +502,13 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_change_mtu = aq_ndev_change_mtu, .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, + .ndo_fix_features = aq_ndev_fix_features, .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, + .ndo_bpf = aq_xdp, + .ndo_xdp_xmit = aq_xdp_xmit, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index a5a624b9ce733..99870865f66db 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -12,6 +12,8 @@ #include "aq_common.h" #include "aq_nic.h" +DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key); + void aq_ndev_schedule_work(struct work_struct *work); struct net_device *aq_ndev_alloc(void); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 24d715c28a355..e11cc29d3264c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -569,6 +569,103 @@ int aq_nic_start(struct aq_nic_s *self) return err; } +static unsigned int aq_nic_map_xdp(struct aq_nic_s *self, + struct xdp_frame *xdpf, + struct aq_ring_s *ring) +{ + struct device *dev = aq_nic_get_dev(self); + struct aq_ring_buff_s *first = NULL; + unsigned int dx = ring->sw_tail; + struct aq_ring_buff_s *dx_buff; + struct skb_shared_info *sinfo; + unsigned int frag_count = 0U; + unsigned int nr_frags = 0U; + unsigned int ret = 0U; + u16 total_len; + + dx_buff = &ring->buff_ring[dx]; + dx_buff->flags = 0U; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + total_len = xdpf->len; + dx_buff->len = total_len; + if (xdp_frame_has_frags(xdpf)) { + nr_frags = sinfo->nr_frags; + total_len += sinfo->xdp_frags_size; + } + dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, dx_buff->pa))) + goto exit; + + first = dx_buff; + dx_buff->len_pkt = total_len; + dx_buff->is_sop = 1U; + dx_buff->is_mapped = 1U; + ++ret; + + for (; nr_frags--; ++frag_count) { + skb_frag_t *frag = &sinfo->frags[frag_count]; + unsigned int frag_len = skb_frag_size(frag); + unsigned int buff_offset = 0U; + unsigned int buff_size = 0U; + dma_addr_t frag_pa; + + while (frag_len) { + if (frag_len > AQ_CFG_TX_FRAME_MAX) + buff_size = AQ_CFG_TX_FRAME_MAX; + else + buff_size = frag_len; + + frag_pa = skb_frag_dma_map(dev, frag, buff_offset, + buff_size, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, frag_pa))) + goto mapping_error; + + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + + dx_buff->flags = 0U; + dx_buff->len = buff_size; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; + dx_buff->eop_index = 0xffffU; + + frag_len -= buff_size; + buff_offset += buff_size; + + ++ret; + } + } + + first->eop_index = dx; + dx_buff->is_eop = 1U; + dx_buff->skb = NULL; + dx_buff->xdpf = xdpf; + goto exit; + +mapping_error: + for (dx = ring->sw_tail; + ret > 0; + --ret, dx = aq_ring_next_dx(ring, dx)) { + dx_buff = &ring->buff_ring[dx]; + + if (!dx_buff->pa) + continue; + if (unlikely(dx_buff->is_sop)) + dma_unmap_single(dev, dx_buff->pa, dx_buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dx_buff->pa, dx_buff->len, + DMA_TO_DEVICE); + } + +exit: + return ret; +} + unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring) { @@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, first->eop_index = dx; dx_buff->is_eop = 1U; dx_buff->skb = skb; + dx_buff->xdpf = NULL; goto exit; mapping_error: @@ -725,6 +823,44 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, return ret; } +int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, + struct xdp_frame *xdpf) +{ + u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx); + struct net_device *ndev = aq_nic_get_ndev(aq_nic); + struct skb_shared_info *sinfo; + int cpu = smp_processor_id(); + int err = NETDEV_TX_BUSY; + struct netdev_queue *nq; + unsigned int frags = 1; + + if (xdp_frame_has_frags(xdpf)) { + sinfo = xdp_get_shared_info_from_frame(xdpf); + frags += sinfo->nr_frags; + } + + if (frags > AQ_CFG_SKB_FRAGS_MAX) + return err; + + nq = netdev_get_tx_queue(ndev, tx_ring->idx); + __netif_tx_lock(nq, cpu); + + aq_ring_update_queue_state(tx_ring); + + /* Above status update may stop the queue. Check this. */ + if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index)) + goto out; + + frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring); + if (likely(frags)) + err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring, + frags); +out: + __netif_tx_unlock(nq); + + return err; +} + int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) { struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 1a7148041e3dc..935ba889bd9a8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -11,6 +11,8 @@ #define AQ_NIC_H #include +#include +#include #include "aq_common.h" #include "aq_rss.h" @@ -128,6 +130,7 @@ struct aq_nic_s { struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX]; struct aq_hw_s *aq_hw; + struct bpf_prog *xdp_prog; struct net_device *ndev; unsigned int aq_vecs; unsigned int packet_filter; @@ -177,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self); unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring); +int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, + struct xdp_frame *xdpf); int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); int aq_nic_get_regs_count(struct aq_nic_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 77e76c9efd32f..ea740210803f1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -7,15 +7,37 @@ /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ -#include "aq_ring.h" #include "aq_nic.h" #include "aq_hw.h" #include "aq_hw_utils.h" #include "aq_ptp.h" +#include "aq_vec.h" +#include "aq_main.h" +#include +#include +#include #include #include +static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo; + int i; + + if (xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + + for (i = 0; i < sinfo->nr_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + page_ref_inc(skb_frag_page(frag)); + } + } + page_ref_inc(buff->rxdata.page); +} + static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) { unsigned int len = PAGE_SIZE << rxpage->order; @@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) rxpage->page = NULL; } -static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, - struct device *dev) +static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring) { + struct device *dev = aq_nic_get_dev(rx_ring->aq_nic); + unsigned int order = rx_ring->page_order; struct page *page; int ret = -ENOMEM; dma_addr_t daddr; @@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, rxpage->page = page; rxpage->daddr = daddr; rxpage->order = order; - rxpage->pg_off = 0; + rxpage->pg_off = rx_ring->page_offset; return 0; @@ -58,21 +81,26 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, return ret; } -static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, - int order) +static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf) { + unsigned int order = self->page_order; + u16 page_offset = self->page_offset; + u16 frame_max = self->frame_max; + u16 tail_size = self->tail_size; int ret; if (rxbuf->rxdata.page) { /* One means ring is the only user and can reuse */ if (page_ref_count(rxbuf->rxdata.page) > 1) { /* Try reuse buffer */ - rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; - if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= - (PAGE_SIZE << order)) { + rxbuf->rxdata.pg_off += frame_max + page_offset + + tail_size; + if (rxbuf->rxdata.pg_off + frame_max + tail_size <= + (PAGE_SIZE << order)) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_flips++; u64_stats_update_end(&self->stats.rx.syncp); + } else { /* Buffer exhausted. We have other users and * should release this page and realloc @@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, u64_stats_update_end(&self->stats.rx.syncp); } } else { - rxbuf->rxdata.pg_off = 0; + rxbuf->rxdata.pg_off = page_offset; u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_reuses++; u64_stats_update_end(&self->stats.rx.syncp); @@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, } if (!rxbuf->rxdata.page) { - ret = aq_get_rxpage(&rxbuf->rxdata, order, - aq_nic_get_dev(self->aq_nic)); + ret = aq_alloc_rxpages(&rxbuf->rxdata, self); if (ret) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.alloc_fails++; @@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, err = -ENOMEM; goto err_exit; } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), self->size * self->dx_size, &self->dx_ring_pa, GFP_KERNEL); @@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, self->idx = idx; self->size = aq_nic_cfg->rxds; self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; - self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + - (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; - - if (aq_nic_cfg->rxpageorder > self->page_order) - self->page_order = aq_nic_cfg->rxpageorder; + self->xdp_prog = aq_nic->xdp_prog; + self->frame_max = AQ_CFG_RX_FRAME_MAX; + + /* Only order-2 is allowed if XDP is enabled */ + if (READ_ONCE(self->xdp_prog)) { + self->page_offset = AQ_XDP_HEADROOM; + self->page_order = AQ_CFG_XDP_PAGEORDER; + self->tail_size = AQ_XDP_TAILROOM; + } else { + self->page_offset = 0; + self->page_order = fls(self->frame_max / PAGE_SIZE + + (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1; + if (aq_nic_cfg->rxpageorder > self->page_order) + self->page_order = aq_nic_cfg->rxpageorder; + self->tail_size = 0; + } self = aq_ring_alloc(self, aq_nic); if (!self) { @@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop && buff->skb)) { + if (likely(!buff->is_eop)) + goto out; + + if (buff->skb) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); - dev_kfree_skb_any(buff->skb); - buff->skb = NULL; + } else if (buff->xdpf) { + u64_stats_update_begin(&self->stats.tx.syncp); + ++self->stats.tx.packets; + self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf); + u64_stats_update_end(&self->stats.tx.syncp); + xdp_return_frame_rx_napi(buff->xdpf); } + +out: + buff->skb = NULL; + buff->xdpf = NULL; buff->pa = 0U; buff->eop_index = 0xffffU; self->sw_head = aq_ring_next_dx(self, self->sw_head); @@ -339,11 +389,159 @@ static void aq_rx_checksum(struct aq_ring_s *self, __skb_incr_checksum_unnecessary(skb); } -#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -int aq_ring_rx_clean(struct aq_ring_s *self, - struct napi_struct *napi, - int *work_done, - int budget) +int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct aq_nic_s *aq_nic = netdev_priv(dev); + unsigned int vec, i, drop = 0; + int cpu = smp_processor_id(); + struct aq_nic_cfg_s *aq_cfg; + struct aq_ring_s *ring; + + aq_cfg = aq_nic_get_cfg(aq_nic); + vec = cpu % aq_cfg->vecs; + ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)]; + + for (i = 0; i < num_frames; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY) + drop++; + } + + return num_frames - drop; +} + +static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, + struct xdp_buff *xdp, + struct aq_ring_s *rx_ring, + struct aq_ring_buff_s *buff) +{ + int result = NETDEV_TX_BUSY; + struct aq_ring_s *tx_ring; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + u32 act = XDP_ABORTED; + struct sk_buff *skb; + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp); + u64_stats_update_end(&rx_ring->stats.rx.syncp); + + prog = READ_ONCE(rx_ring->xdp_prog); + if (!prog) + goto pass; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + /* single buffer XDP program, but packet is multi buffer, aborted */ + if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags) + goto out_aborted; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: +pass: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev); + if (!skb) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_pass; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + return skb; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + tx_ring = aq_nic->aq_ring_tx[rx_ring->idx]; + result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf); + if (result == NETDEV_TX_BUSY) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_tx; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0) + goto out_aborted; + xdp_do_flush(); + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_redirect; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + default: + fallthrough; + case XDP_ABORTED: +out_aborted: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + trace_xdp_exception(aq_nic->ndev, prog, act); + bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act); + break; + case XDP_DROP: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_drop; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + break; + } + + return ERR_PTR(-result); +} + +static bool aq_add_rx_fragment(struct device *dev, + struct aq_ring_s *ring, + struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + struct aq_ring_buff_s *buff_ = buff; + + memset(sinfo, 0, sizeof(*sinfo)); + do { + skb_frag_t *frag; + + if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) + return true; + + frag = &sinfo->frags[sinfo->nr_frags++]; + buff_ = &ring->buff_ring[buff_->next]; + dma_sync_single_range_for_cpu(dev, + buff_->rxdata.daddr, + buff_->rxdata.pg_off, + buff_->len, + DMA_FROM_DEVICE); + skb_frag_off_set(frag, buff_->rxdata.pg_off); + skb_frag_size_set(frag, buff_->len); + sinfo->xdp_frags_size += buff_->len; + __skb_frag_set_page(frag, buff_->rxdata.page); + + buff_->is_cleaned = 1; + + buff->is_ip_cso &= buff_->is_ip_cso; + buff->is_udp_cso &= buff_->is_udp_cso; + buff->is_tcp_cso &= buff_->is_tcp_cso; + buff->is_cso_err |= buff_->is_cso_err; + + if (page_is_pfmemalloc(buff_->rxdata.page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + } while (!buff_->is_eop); + + xdp_buff_set_frags_flag(xdp); + + return false; +} + +static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, + int *work_done, int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); bool is_rsc_completed = true; @@ -449,7 +647,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, skb_add_rx_frag(skb, 0, buff->rxdata.page, buff->rxdata.pg_off + hdr_len, buff->len - hdr_len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff->rxdata.page); } @@ -469,7 +667,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff_->rxdata.page, buff_->rxdata.pg_off, buff_->len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff_->rxdata.page); buff_->is_cleaned = 1; @@ -510,6 +708,149 @@ int aq_ring_rx_clean(struct aq_ring_s *self, return err; } +static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, + struct napi_struct *napi, int *work_done, + int budget) +{ + int frame_sz = rx_ring->page_offset + rx_ring->frame_max + + rx_ring->tail_size; + struct aq_nic_s *aq_nic = rx_ring->aq_nic; + bool is_rsc_completed = true; + struct device *dev; + int err = 0; + + dev = aq_nic_get_dev(aq_nic); + for (; (rx_ring->sw_head != rx_ring->hw_head) && budget; + rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; + bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); + struct aq_ring_buff_s *buff_ = NULL; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + struct xdp_buff xdp; + void *hard_start; + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + is_rsc_completed = + aq_ring_dx_in_range(rx_ring->sw_head, + next_, + rx_ring->hw_head); + + if (unlikely(!is_rsc_completed)) + break; + + buff->is_error |= buff_->is_error; + buff->is_cso_err |= buff_->is_cso_err; + } while (!buff_->is_eop); + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + if (buff->is_error || + (buff->is_lro && buff->is_cso_err)) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + + buff_->is_cleaned = true; + } while (!buff_->is_eop); + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + if (buff->is_error) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + + dma_sync_single_range_for_cpu(dev, + buff->rxdata.daddr, + buff->rxdata.pg_off, + buff->len, DMA_FROM_DEVICE); + hard_start = page_address(buff->rxdata.page) + + buff->rxdata.pg_off - rx_ring->page_offset; + + if (is_ptp_ring) + buff->len -= + aq_ptp_extract_ts(rx_ring->aq_nic, skb, + aq_buf_vaddr(&buff->rxdata), + buff->len); + + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, + buff->len, false); + if (!buff->is_eop) { + if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff); + if (IS_ERR(skb) || !skb) + continue; + + if (buff->is_vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + buff->vlan_rx_tag); + + aq_rx_checksum(rx_ring, buff, skb); + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + /* Send all PTP traffic to 0 queue */ + skb_record_rx_queue(skb, + is_ptp_ring ? 0 + : AQ_NIC_RING2QMAP(rx_ring->aq_nic, + rx_ring->idx)); + + napi_gro_receive(napi, skb); + } + +err_exit: + return err; +} + +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) +{ + if (static_branch_unlikely(&aq_xdp_locking_key)) + return __aq_ring_xdp_clean(self, napi, work_done, budget); + else + return __aq_ring_rx_clean(self, napi, work_done, budget); +} + void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) { #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) @@ -529,7 +870,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) int aq_ring_rx_fill(struct aq_ring_s *self) { - unsigned int page_order = self->page_order; struct aq_ring_buff_s *buff = NULL; int err = 0; int i = 0; @@ -543,9 +883,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff = &self->buff_ring[self->sw_tail]; buff->flags = 0U; - buff->len = AQ_CFG_RX_FRAME_MAX; + buff->len = self->frame_max; - err = aq_get_rxpages(self, buff, page_order); + err = aq_get_rxpages(self, buff); if (err) goto err_exit; @@ -600,6 +940,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) data[++count] = self->stats.rx.alloc_fails; data[++count] = self->stats.rx.skb_alloc_fails; data[++count] = self->stats.rx.polls; + data[++count] = self->stats.rx.pg_flips; + data[++count] = self->stats.rx.pg_reuses; + data[++count] = self->stats.rx.pg_losts; + data[++count] = self->stats.rx.xdp_aborted; + data[++count] = self->stats.rx.xdp_drop; + data[++count] = self->stats.rx.xdp_pass; + data[++count] = self->stats.rx.xdp_tx; + data[++count] = self->stats.rx.xdp_invalid; + data[++count] = self->stats.rx.xdp_redirect; } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 93659e58f1ce1..0a6c34438c1d0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -11,6 +11,10 @@ #define AQ_RING_H #include "aq_common.h" +#include "aq_vec.h" + +#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) struct page; struct aq_nic_cfg_s; @@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s { struct { dma_addr_t pa_eop; struct sk_buff *skb; + struct xdp_frame *xdpf; }; /* TxC */ struct { @@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s { u64 pg_losts; u64 pg_flips; u64 pg_reuses; + u64 xdp_aborted; + u64 xdp_drop; + u64 xdp_pass; + u64 xdp_tx; + u64 xdp_invalid; + u64 xdp_redirect; }; struct aq_ring_stats_tx_s { @@ -132,10 +143,15 @@ struct aq_ring_s { unsigned int size; /* descriptors number */ unsigned int dx_size; /* TX or RX descriptor size, */ /* stored here for fater math */ - unsigned int page_order; + u16 page_order; + u16 page_offset; + u16 frame_max; + u16 tail_size; union aq_ring_stats_s stats; dma_addr_t dx_ring_pa; + struct bpf_prog *xdp_prog; enum atl_ring_type ring_type; + struct xdp_rxq_info xdp_rxq; }; struct aq_ring_param_s { @@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); + int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); @@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring); void aq_ring_queue_wake(struct aq_ring_s *ring); void aq_ring_queue_stop(struct aq_ring_s *ring); bool aq_ring_tx_clean(struct aq_ring_s *self); +int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags); int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, int *work_done, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 6ab1f3212d246..e839e1002ec78 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -10,11 +10,6 @@ */ #include "aq_vec.h" -#include "aq_nic.h" -#include "aq_ring.h" -#include "aq_hw.h" - -#include struct aq_vec_s { const struct aq_hw_ops *aq_hw_ops; @@ -153,9 +148,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + aq_nic->ndev, idx, + self->napi.napi_id) < 0) { + err = -ENOMEM; + goto err_exit; + } + if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL) < 0) { + xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); + err = -ENOMEM; + goto err_exit; + } + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, idx_ring, aq_nic_cfg); if (!ring) { + xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); err = -ENOMEM; goto err_exit; } @@ -300,8 +309,10 @@ void aq_vec_ring_free(struct aq_vec_s *self) for (i = 0U; self->tx_rings > i; ++i) { ring = self->ring[i]; aq_ring_free(&ring[AQ_VEC_TX_ID]); - if (i < self->rx_rings) + if (i < self->rx_rings) { + xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq); aq_ring_free(&ring[AQ_VEC_RX_ID]); + } } self->tx_rings = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index 567f3d4b79a26..78fac609b71d6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -13,7 +13,13 @@ #define AQ_VEC_H #include "aq_common.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + #include +#include +#include struct aq_hw_s; struct aq_hw_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 4625ccb794997..9dfd68f0fda95 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index d875ce3ec759b..878a53abec33f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@ -969,15 +969,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring) rxd_wb->status); if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { buff->len = - rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? - AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + rxd_wb->pkt_len > ring->frame_max ? + ring->frame_max : rxd_wb->pkt_len; if (buff->is_lro) { /* LRO */