diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 83c8908f0cc7d..309bd3e97ea00 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1884,8 +1884,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, bytes_compl += buf->skb->len; pkts_compl++; dev_kfree_skb_any(buf->skb); - } else if (buf->type == MVNETA_TYPE_XDP_TX || - buf->type == MVNETA_TYPE_XDP_NDO) { + } else if ((buf->type == MVNETA_TYPE_XDP_TX || + buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { if (napi && buf->type == MVNETA_TYPE_XDP_TX) xdp_return_frame_rx_napi(buf->xdpf); else @@ -2060,61 +2060,106 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) static void mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, - struct xdp_buff *xdp, struct skb_shared_info *sinfo, - int sync_len) + struct xdp_buff *xdp, int sync_len) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i; + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + for (i = 0; i < sinfo->nr_frags; i++) page_pool_put_full_page(rxq->page_pool, skb_frag_page(&sinfo->frags[i]), true); + +out: page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), sync_len, true); } static int mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, - struct xdp_frame *xdpf, bool dma_map) + struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map) { - struct mvneta_tx_desc *tx_desc; - struct mvneta_tx_buf *buf; - dma_addr_t dma_addr; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + struct device *dev = pp->dev->dev.parent; + struct mvneta_tx_desc *tx_desc = NULL; + int i, num_frames = 1; + struct page *page; + + if (unlikely(xdp_frame_has_frags(xdpf))) + num_frames += sinfo->nr_frags; - if (txq->count >= txq->tx_stop_threshold) + if (txq->count + num_frames >= txq->size) return MVNETA_XDP_DROPPED; - tx_desc = mvneta_txq_next_desc_get(txq); + for (i = 0; i < num_frames; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + skb_frag_t *frag = NULL; + int len = xdpf->len; + dma_addr_t dma_addr; - buf = &txq->buf[txq->txq_put_index]; - if (dma_map) { - /* ndo_xdp_xmit */ - dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data, - xdpf->len, DMA_TO_DEVICE); - if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) { - mvneta_txq_desc_put(txq); - return MVNETA_XDP_DROPPED; + if (unlikely(i)) { /* paged area */ + frag = &sinfo->frags[i - 1]; + len = skb_frag_size(frag); } - buf->type = MVNETA_TYPE_XDP_NDO; - } else { - struct page *page = virt_to_page(xdpf->data); - dma_addr = page_pool_get_dma_addr(page) + - sizeof(*xdpf) + xdpf->headroom; - dma_sync_single_for_device(pp->dev->dev.parent, dma_addr, - xdpf->len, DMA_BIDIRECTIONAL); - buf->type = MVNETA_TYPE_XDP_TX; + tx_desc = mvneta_txq_next_desc_get(txq); + if (dma_map) { + /* ndo_xdp_xmit */ + void *data; + + data = unlikely(frag) ? skb_frag_address(frag) + : xdpf->data; + dma_addr = dma_map_single(dev, data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + mvneta_txq_desc_put(txq); + goto unmap; + } + + buf->type = MVNETA_TYPE_XDP_NDO; + } else { + page = unlikely(frag) ? skb_frag_page(frag) + : virt_to_page(xdpf->data); + dma_addr = page_pool_get_dma_addr(page); + if (unlikely(frag)) + dma_addr += skb_frag_off(frag); + else + dma_addr += sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(dev, dma_addr, len, + DMA_BIDIRECTIONAL); + buf->type = MVNETA_TYPE_XDP_TX; + } + buf->xdpf = unlikely(i) ? NULL : xdpf; + + tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC; + tx_desc->buf_phys_addr = dma_addr; + tx_desc->data_size = len; + *nxmit_byte += len; + + mvneta_txq_inc_put(txq); } - buf->xdpf = xdpf; - tx_desc->command = MVNETA_TXD_FLZ_DESC; - tx_desc->buf_phys_addr = dma_addr; - tx_desc->data_size = xdpf->len; + /*last descriptor */ + if (likely(tx_desc)) + tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - mvneta_txq_inc_put(txq); - txq->pending++; - txq->count++; + txq->pending += num_frames; + txq->count += num_frames; return MVNETA_XDP_TX; + +unmap: + for (i--; i >= 0; i--) { + mvneta_txq_desc_put(txq); + tx_desc = txq->descs + txq->next_desc_to_proc; + dma_unmap_single(dev, tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + } + + return MVNETA_XDP_DROPPED; } static int @@ -2123,8 +2168,8 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu, nxmit_byte = 0; struct xdp_frame *xdpf; - int cpu; u32 ret; xdpf = xdp_convert_buff_to_frame(xdp); @@ -2136,10 +2181,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) nq = netdev_get_tx_queue(pp->dev, txq->id); __netif_tx_lock(nq, cpu); - ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false); + ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false); if (ret == MVNETA_XDP_TX) { u64_stats_update_begin(&stats->syncp); - stats->es.ps.tx_bytes += xdpf->len; + stats->es.ps.tx_bytes += nxmit_byte; stats->es.ps.tx_packets++; stats->es.ps.xdp_tx++; u64_stats_update_end(&stats->syncp); @@ -2178,11 +2223,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame, __netif_tx_lock(nq, cpu); for (i = 0; i < num_frame; i++) { - ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true); + ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte, + true); if (ret != MVNETA_XDP_TX) break; - nxmit_byte += frames[i]->len; nxmit++; } @@ -2205,7 +2250,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, u32 frame_sz, struct mvneta_stats *stats) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); unsigned int len, data_len, sync; u32 ret, act; @@ -2226,7 +2270,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, err = xdp_do_redirect(pp->dev, xdp, prog); if (unlikely(err)) { - mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); + mvneta_xdp_put_buff(pp, rxq, xdp, sync); ret = MVNETA_XDP_DROPPED; } else { ret = MVNETA_XDP_REDIR; @@ -2237,7 +2281,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) - mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); + mvneta_xdp_put_buff(pp, rxq, xdp, sync); break; default: bpf_warn_invalid_xdp_action(pp->dev, prog, act); @@ -2246,7 +2290,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, trace_xdp_exception(pp->dev, prog, act); fallthrough; case XDP_DROP: - mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); + mvneta_xdp_put_buff(pp, rxq, xdp, sync); ret = MVNETA_XDP_DROPPED; stats->xdp_drop++; break; @@ -2269,7 +2313,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, int data_len = -MVNETA_MH_SIZE, len; struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; - struct skb_shared_info *sinfo; if (*size > MVNETA_MAX_RX_BUF_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; @@ -2289,11 +2332,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, /* Prefetch header */ prefetch(data); + xdp_buff_clear_frags_flag(xdp); xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, data_len, false); - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = 0; } static void @@ -2301,9 +2342,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, int *size, - struct skb_shared_info *xdp_sinfo, struct page *page) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; int data_len, len; @@ -2321,25 +2362,25 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, len, dma_dir); rx_desc->buf_phys_addr = 0; - if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) { - skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++]; + if (!xdp_buff_has_frags(xdp)) + sinfo->nr_frags = 0; + + if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { + skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); + + if (!xdp_buff_has_frags(xdp)) { + sinfo->xdp_frags_size = *size; + xdp_buff_set_frags_flag(xdp); + } + if (page_is_pfmemalloc(page)) + xdp_buff_set_frag_pfmemalloc(xdp); } else { page_pool_put_full_page(rxq->page_pool, page, true); } - - /* last fragment */ - if (len == *size) { - struct skb_shared_info *sinfo; - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = xdp_sinfo->nr_frags; - memcpy(sinfo->frags, xdp_sinfo->frags, - sinfo->nr_frags * sizeof(skb_frag_t)); - } *size -= len; } @@ -2348,8 +2389,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, struct xdp_buff *xdp, u32 desc_status) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); - int i, num_frags = sinfo->nr_frags; struct sk_buff *skb; + u8 num_frags; + + if (unlikely(xdp_buff_has_frags(xdp))) + num_frags = sinfo->nr_frags; skb = build_skb(xdp->data_hard_start, PAGE_SIZE); if (!skb) @@ -2361,13 +2405,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, skb_put(skb, xdp->data_end - xdp->data); skb->ip_summed = mvneta_rx_csum(pp, desc_status); - for (i = 0; i < num_frags; i++) { - skb_frag_t *frag = &sinfo->frags[i]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - skb_frag_page(frag), skb_frag_off(frag), - skb_frag_size(frag), PAGE_SIZE); - } + if (unlikely(xdp_buff_has_frags(xdp))) + xdp_update_skb_shared_info(skb, num_frags, + sinfo->xdp_frags_size, + num_frags * xdp->frame_sz, + xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } @@ -2379,7 +2421,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, { int rx_proc = 0, rx_todo, refill, size = 0; struct net_device *dev = pp->dev; - struct skb_shared_info sinfo; struct mvneta_stats ps = {}; struct bpf_prog *xdp_prog; u32 desc_status, frame_sz; @@ -2388,8 +2429,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); xdp_buf.data_hard_start = NULL; - sinfo.nr_frags = 0; - /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); @@ -2431,7 +2470,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, } mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, - &size, &sinfo, page); + &size, page); } /* Middle or Last descriptor */ if (!(rx_status & MVNETA_RXD_LAST_DESC)) @@ -2439,7 +2478,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, continue; if (size) { - mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); goto next; } @@ -2451,7 +2490,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, if (IS_ERR(skb)) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); - mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); u64_stats_update_begin(&stats->syncp); stats->es.skb_alloc_error++; @@ -2468,11 +2507,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, napi_gro_receive(napi, skb); next: xdp_buf.data_hard_start = NULL; - sinfo.nr_frags = 0; } if (xdp_buf.data_hard_start) - mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); if (ps.xdp_redirect) xdp_do_flush_map(); @@ -3260,7 +3298,8 @@ static int mvneta_create_page_pool(struct mvneta_port *pp, return err; } - err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); + err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0, + PAGE_SIZE); if (err < 0) goto err_free_pp; @@ -3740,6 +3779,7 @@ static void mvneta_percpu_disable(void *arg) static int mvneta_change_mtu(struct net_device *dev, int mtu) { struct mvneta_port *pp = netdev_priv(dev); + struct bpf_prog *prog = pp->xdp_prog; int ret; if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { @@ -3748,8 +3788,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); } - if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) { - netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu); + if (prog && !prog->aux->xdp_has_frags && + mtu > MVNETA_MAX_RX_BUF_SIZE) { + netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n", + mtu); + return -EINVAL; } @@ -4490,8 +4533,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, struct mvneta_port *pp = netdev_priv(dev); struct bpf_prog *old_prog; - if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { - NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); + if (prog && !prog->aux->xdp_has_frags && + dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags"); return -EOPNOTSUPP; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 80e3387ea3afc..e8ec8d2f2fe37 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -194,6 +194,17 @@ struct bpf_map { struct work_struct work; struct mutex freeze_mutex; atomic64_t writecnt; + /* 'Ownership' of program-containing map is claimed by the first program + * that is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have the + * same prog type, JITed flag and xdp_has_frags flag. + */ + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -933,6 +944,7 @@ struct bpf_prog_aux { bool func_proto_unreliable; bool sleepable; bool tail_call_reachable; + bool xdp_has_frags; struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; @@ -993,16 +1005,6 @@ struct bpf_prog_aux { }; struct bpf_array_aux { - /* 'Ownership' of prog array is claimed by the first program that - * is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have - * the same prog type and JITed flag. - */ - struct { - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@ -1177,7 +1179,14 @@ struct bpf_event_entry { struct rcu_head rcu; }; -bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +static inline bool map_type_contains_progs(struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || + map->map_type == BPF_MAP_TYPE_DEVMAP || + map->map_type == BPF_MAP_TYPE_CPUMAP; +} + +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bf11e1fbd69b6..8131d0de7559c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -557,6 +557,7 @@ struct skb_shared_info { * Warning : all fields before dataref are cleared in __alloc_skb() */ atomic_t dataref; + unsigned int xdp_frags_size; /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ diff --git a/include/net/xdp.h b/include/net/xdp.h index 8f0812e4996da..b7721c3e4d1fa 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -60,12 +60,20 @@ struct xdp_rxq_info { u32 reg_state; struct xdp_mem_info mem; unsigned int napi_id; + u32 frag_size; } ____cacheline_aligned; /* perf critical, avoid false-sharing */ struct xdp_txq_info { struct net_device *dev; }; +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ + XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under + * pressure + */ +}; + struct xdp_buff { void *data; void *data_end; @@ -74,13 +82,40 @@ struct xdp_buff { struct xdp_rxq_info *rxq; struct xdp_txq_info *txq; u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ + u32 flags; /* supported values defined in xdp_buff_flags */ }; +static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) +{ + return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); +} + +static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) +{ + xdp->flags |= XDP_FLAGS_HAS_FRAGS; +} + +static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) +{ + xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; +} + +static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) +{ + return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); +} + +static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) +{ + xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; +} + static __always_inline void xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) { xdp->frame_sz = frame_sz; xdp->rxq = rxq; + xdp->flags = 0; } static __always_inline void @@ -111,6 +146,20 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp) return (struct skb_shared_info *)xdp_data_hard_end(xdp); } +static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp) +{ + unsigned int len = xdp->data_end - xdp->data; + struct skb_shared_info *sinfo; + + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + sinfo = xdp_get_shared_info_from_buff(xdp); + len += sinfo->xdp_frags_size; +out: + return len; +} + struct xdp_frame { void *data; u16 len; @@ -122,8 +171,19 @@ struct xdp_frame { */ struct xdp_mem_info mem; struct net_device *dev_rx; /* used by cpumap */ + u32 flags; /* supported values defined in xdp_buff_flags */ }; +static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) +{ + return !!(frame->flags & XDP_FLAGS_HAS_FRAGS); +} + +static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame) +{ + return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); +} + #define XDP_BULK_QUEUE_SIZE 16 struct xdp_frame_bulk { int count; @@ -159,6 +219,19 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame) frame->dev_rx = NULL; } +static inline void +xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, + unsigned int size, unsigned int truesize, + bool pfmemalloc) +{ + skb_shinfo(skb)->nr_frags = nr_frags; + + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; + skb->pfmemalloc |= pfmemalloc; +} + /* Avoids inlining WARN macro in fast-path */ void xdp_warn(const char *msg, const char *func, const int line); #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) @@ -180,6 +253,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) xdp->data_end = frame->data + frame->len; xdp->data_meta = frame->data - frame->metasize; xdp->frame_sz = frame->frame_sz; + xdp->flags = frame->flags; } static inline @@ -206,6 +280,7 @@ int xdp_update_frame_from_buff(struct xdp_buff *xdp, xdp_frame->headroom = headroom - sizeof(*xdp_frame); xdp_frame->metasize = metasize; xdp_frame->frame_sz = xdp->frame_sz; + xdp_frame->flags = xdp->flags; return 0; } @@ -230,6 +305,8 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) return xdp_frame; } +void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, + struct xdp_buff *xdp); void xdp_return_frame(struct xdp_frame *xdpf); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); void xdp_return_buff(struct xdp_buff *xdp); @@ -246,14 +323,37 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem); static inline void xdp_release_frame(struct xdp_frame *xdpf) { struct xdp_mem_info *mem = &xdpf->mem; + struct skb_shared_info *sinfo; + int i; /* Curr only page_pool needs this */ - if (mem->type == MEM_TYPE_PAGE_POOL) - __xdp_release_frame(xdpf->data, mem); + if (mem->type != MEM_TYPE_PAGE_POOL) + return; + + if (likely(!xdp_frame_has_frags(xdpf))) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_release_frame(page_address(page), mem); + } +out: + __xdp_release_frame(xdpf->data, mem); +} + +int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index, + unsigned int napi_id, u32 frag_size); +static inline int +xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index, + unsigned int napi_id) +{ + return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0); } -int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, - struct net_device *dev, u32 queue_index, unsigned int napi_id); void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index fe2272defcd95..16a7574292a53 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1113,6 +1113,11 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -5049,6 +5054,28 @@ union bpf_attr { * This helper is currently supported by cgroup programs only. * Return * 0 on success, or a negative error in case of failure. + * + * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) + * Description + * Get the total size of a given xdp buff (linear and paged area) + * Return + * The total size of a given xdp buffer. + * + * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * This helper is provided as an easy way to load data from a + * xdp buffer. It can be used to load *len* bytes from *offset* from + * the frame associated to *xdp_md*, into the buffer pointed by + * *buf*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * Store *len* bytes from buffer *buf* into the frame + * associated to *xdp_md*, at *offset*. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5239,6 +5266,9 @@ union bpf_attr { FN(get_func_arg_cnt), \ FN(get_retval), \ FN(set_retval), \ + FN(xdp_get_buff_len), \ + FN(xdp_load_bytes), \ + FN(xdp_store_bytes), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index c7a5be3bf8bea..7f145aefbff87 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -837,13 +837,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key) static void *prog_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { - struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_prog *prog = bpf_prog_get(fd); if (IS_ERR(prog)) return prog; - if (!bpf_prog_array_compatible(array, prog)) { + if (!bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return ERR_PTR(-EINVAL); } @@ -1071,7 +1070,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); - spin_lock_init(&aux->owner.lock); map = array_map_alloc(attr); if (IS_ERR(map)) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index de3e5bc6781fe..0a1cfd8544b9c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1829,28 +1829,30 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, } #endif -bool bpf_prog_array_compatible(struct bpf_array *array, - const struct bpf_prog *fp) +bool bpf_prog_map_compatible(struct bpf_map *map, + const struct bpf_prog *fp) { bool ret; if (fp->kprobe_override) return false; - spin_lock(&array->aux->owner.lock); - - if (!array->aux->owner.type) { + spin_lock(&map->owner.lock); + if (!map->owner.type) { /* There's no owner yet where we could check for * compatibility. */ - array->aux->owner.type = fp->type; - array->aux->owner.jited = fp->jited; + map->owner.type = fp->type; + map->owner.jited = fp->jited; + map->owner.xdp_has_frags = fp->aux->xdp_has_frags; ret = true; } else { - ret = array->aux->owner.type == fp->type && - array->aux->owner.jited == fp->jited; + ret = map->owner.type == fp->type && + map->owner.jited == fp->jited && + map->owner.xdp_has_frags == fp->aux->xdp_has_frags; } - spin_unlock(&array->aux->owner.lock); + spin_unlock(&map->owner.lock); + return ret; } @@ -1862,13 +1864,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) mutex_lock(&aux->used_maps_mutex); for (i = 0; i < aux->used_map_cnt; i++) { struct bpf_map *map = aux->used_maps[i]; - struct bpf_array *array; - if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) + if (!map_type_contains_progs(map)) continue; - array = container_of(map, struct bpf_array, map); - if (!bpf_prog_array_compatible(array, fp)) { + if (!bpf_prog_map_compatible(map, fp)) { ret = -EINVAL; goto out; } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index b3e6b94222380..650e5d21f90d0 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -397,7 +397,8 @@ static int cpu_map_kthread_run(void *data) return 0; } -static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, + struct bpf_map *map, int fd) { struct bpf_prog *prog; @@ -405,7 +406,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) if (IS_ERR(prog)) return PTR_ERR(prog); - if (prog->expected_attach_type != BPF_XDP_CPUMAP) { + if (prog->expected_attach_type != BPF_XDP_CPUMAP || + !bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return -EINVAL; } @@ -457,7 +459,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, rcpu->map_id = map->id; rcpu->value.qsize = value->qsize; - if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) + if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) goto free_ptr_ring; /* Setup kthread */ diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index fe019dbdb3f0a..038f6d7a83e43 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -858,7 +858,8 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, BPF_PROG_TYPE_XDP, false); if (IS_ERR(prog)) goto err_put_dev; - if (prog->expected_attach_type != BPF_XDP_DEVMAP) + if (prog->expected_attach_type != BPF_XDP_DEVMAP || + !bpf_prog_map_compatible(&dtab->map, prog)) goto err_put_prog; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9e0631f091a65..72ce1edde950d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -556,16 +556,14 @@ static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) { - const struct bpf_map *map = filp->private_data; - const struct bpf_array *array; + struct bpf_map *map = filp->private_data; u32 type = 0, jited = 0; - if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { - array = container_of(map, struct bpf_array, map); - spin_lock(&array->aux->owner.lock); - type = array->aux->owner.type; - jited = array->aux->owner.jited; - spin_unlock(&array->aux->owner.lock); + if (map_type_contains_progs(map)) { + spin_lock(&map->owner.lock); + type = map->owner.type; + jited = map->owner.jited; + spin_unlock(&map->owner.lock); } seq_printf(m, @@ -874,6 +872,7 @@ static int map_create(union bpf_attr *attr) atomic64_set(&map->refcnt, 1); atomic64_set(&map->usercnt, 1); mutex_init(&map->freeze_mutex); + spin_lock_init(&map->owner.lock); map->spin_lock_off = -EINVAL; map->timer_off = -EINVAL; @@ -2217,7 +2216,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) BPF_F_ANY_ALIGNMENT | BPF_F_TEST_STATE_FREQ | BPF_F_SLEEPABLE | - BPF_F_TEST_RND_HI32)) + BPF_F_TEST_RND_HI32 | + BPF_F_XDP_HAS_FRAGS)) return -EINVAL; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && @@ -2303,6 +2303,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) prog->aux->dst_prog = dst_prog; prog->aux->offload_requested = !!attr->prog_ifindex; prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; + prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; err = security_bpf_prog_alloc(prog->aux); if (err) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 21aa30644219e..06a9e220069ea 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1562,6 +1562,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { extern const struct bpf_func_proto bpf_skb_output_proto; extern const struct bpf_func_proto bpf_xdp_output_proto; +extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, struct bpf_map *, map, u64, flags) @@ -1661,6 +1662,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sock_from_file_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_ptr_cookie_proto; + case BPF_FUNC_xdp_get_buff_len: + return &bpf_xdp_get_buff_len_trace_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 3a5bf8ad834dd..541933abd16f8 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -131,7 +131,8 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, static int bpf_test_finish(const union bpf_attr *kattr, union bpf_attr __user *uattr, const void *data, - u32 size, u32 retval, u32 duration) + struct skb_shared_info *sinfo, u32 size, + u32 retval, u32 duration) { void __user *data_out = u64_to_user_ptr(kattr->test.data_out); int err = -EFAULT; @@ -146,8 +147,36 @@ static int bpf_test_finish(const union bpf_attr *kattr, err = -ENOSPC; } - if (data_out && copy_to_user(data_out, data, copy_size)) - goto out; + if (data_out) { + int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; + + if (copy_to_user(data_out, data, len)) + goto out; + + if (sinfo) { + int i, offset = len, data_len; + + for (i = 0; i < sinfo->nr_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + if (offset >= copy_size) { + err = -ENOSPC; + break; + } + + data_len = min_t(int, copy_size - offset, + skb_frag_size(frag)); + + if (copy_to_user(data_out + offset, + skb_frag_address(frag), + data_len)) + goto out; + + offset += data_len; + } + } + } + if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) goto out; if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) @@ -367,11 +396,10 @@ BTF_SET_START(test_sk_ret_null_kfunc_ids) BTF_ID(func, bpf_kfunc_call_test_acquire) BTF_SET_END(test_sk_ret_null_kfunc_ids) -static void *bpf_test_init(const union bpf_attr *kattr, u32 size, - u32 headroom, u32 tailroom) +static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, + u32 size, u32 headroom, u32 tailroom) { void __user *data_in = u64_to_user_ptr(kattr->test.data_in); - u32 user_size = kattr->test.data_size_in; void *data; if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) @@ -699,7 +727,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, if (kattr->test.flags || kattr->test.cpu) return -EINVAL; - data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, + data = bpf_test_init(kattr, kattr->test.data_size_in, + size, NET_SKB_PAD + NET_IP_ALIGN, SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); if (IS_ERR(data)) return PTR_ERR(data); @@ -801,7 +830,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, /* bpf program can never convert linear skb to non-linear */ if (WARN_ON_ONCE(skb_is_nonlinear(skb))) size = skb_headlen(skb); - ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); + ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, + duration); if (!ret) ret = bpf_ctx_finish(kattr, uattr, ctx, sizeof(struct __sk_buff)); @@ -876,16 +906,16 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - u32 headroom = XDP_PACKET_HEADROOM; u32 size = kattr->test.data_size_in; + u32 headroom = XDP_PACKET_HEADROOM; + u32 retval, duration, max_data_sz; u32 repeat = kattr->test.repeat; struct netdev_rx_queue *rxqueue; + struct skb_shared_info *sinfo; struct xdp_buff xdp = {}; - u32 retval, duration; + int i, ret = -EINVAL; struct xdp_md *ctx; - u32 max_data_sz; void *data; - int ret = -EINVAL; if (prog->expected_attach_type == BPF_XDP_DEVMAP || prog->expected_attach_type == BPF_XDP_CPUMAP) @@ -905,26 +935,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, headroom -= ctx->data; } - /* XDP have extra tailroom as (most) drivers use full page */ max_data_sz = 4096 - headroom - tailroom; + size = min_t(u32, size, max_data_sz); - data = bpf_test_init(kattr, max_data_sz, headroom, tailroom); + data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); if (IS_ERR(data)) { ret = PTR_ERR(data); goto free_ctx; } rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); - xdp_init_buff(&xdp, headroom + max_data_sz + tailroom, - &rxqueue->xdp_rxq); + rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; + xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); xdp_prepare_buff(&xdp, data, headroom, size, true); + sinfo = xdp_get_shared_info_from_buff(&xdp); ret = xdp_convert_md_to_buff(ctx, &xdp); if (ret) goto free_data; + if (unlikely(kattr->test.data_size_in > size)) { + void __user *data_in = u64_to_user_ptr(kattr->test.data_in); + + while (size < kattr->test.data_size_in) { + struct page *page; + skb_frag_t *frag; + int data_len; + + page = alloc_page(GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto out; + } + + frag = &sinfo->frags[sinfo->nr_frags++]; + __skb_frag_set_page(frag, page); + + data_len = min_t(int, kattr->test.data_size_in - size, + PAGE_SIZE); + skb_frag_size_set(frag, data_len); + + if (copy_from_user(page_address(page), data_in + size, + data_len)) { + ret = -EFAULT; + goto out; + } + sinfo->xdp_frags_size += data_len; + size += data_len; + } + xdp_buff_set_frags_flag(&xdp); + } + if (repeat > 1) bpf_prog_change_xdp(NULL, prog); + ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); /* We convert the xdp_buff back to an xdp_md before checking the return * code so the reference count of any held netdevice will be decremented @@ -934,12 +998,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (ret) goto out; - if (xdp.data_meta != data + headroom || - xdp.data_end != xdp.data_meta + size) - size = xdp.data_end - xdp.data_meta; - - ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval, - duration); + size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; + ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, + retval, duration); if (!ret) ret = bpf_ctx_finish(kattr, uattr, ctx, sizeof(struct xdp_md)); @@ -948,6 +1009,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (repeat > 1) bpf_prog_change_xdp(prog, NULL); free_data: + for (i = 0; i < sinfo->nr_frags; i++) + __free_page(skb_frag_page(&sinfo->frags[i])); kfree(data); free_ctx: kfree(ctx); @@ -994,7 +1057,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, if (size < ETH_HLEN) return -EINVAL; - data = bpf_test_init(kattr, size, 0, 0); + data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); if (IS_ERR(data)) return PTR_ERR(data); @@ -1029,8 +1092,8 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, if (ret < 0) goto out; - ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), - retval, duration); + ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, + sizeof(flow_keys), retval, duration); if (!ret) ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(struct bpf_flow_keys)); @@ -1134,7 +1197,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); } - ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration); + ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); if (!ret) ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); diff --git a/net/core/filter.c b/net/core/filter.c index f73a84c75970e..a06931c27eebe 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3783,6 +3783,28 @@ static const struct bpf_func_proto sk_skb_change_head_proto = { .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; + +BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) +{ + return xdp_get_buff_len(xdp); +} + +static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = { + .func = bpf_xdp_get_buff_len, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff) + +const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = { + .func = bpf_xdp_get_buff_len, + .gpl_only = false, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &bpf_xdp_get_buff_len_bpf_ids[0], +}; + static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : @@ -3817,11 +3839,208 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; +static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, + void *buf, unsigned long len, bool flush) +{ + unsigned long ptr_len, ptr_off = 0; + skb_frag_t *next_frag, *end_frag; + struct skb_shared_info *sinfo; + void *src, *dst; + u8 *ptr_buf; + + if (likely(xdp->data_end - xdp->data >= off + len)) { + src = flush ? buf : xdp->data + off; + dst = flush ? xdp->data + off : buf; + memcpy(dst, src, len); + return; + } + + sinfo = xdp_get_shared_info_from_buff(xdp); + end_frag = &sinfo->frags[sinfo->nr_frags]; + next_frag = &sinfo->frags[0]; + + ptr_len = xdp->data_end - xdp->data; + ptr_buf = xdp->data; + + while (true) { + if (off < ptr_off + ptr_len) { + unsigned long copy_off = off - ptr_off; + unsigned long copy_len = min(len, ptr_len - copy_off); + + src = flush ? buf : ptr_buf + copy_off; + dst = flush ? ptr_buf + copy_off : buf; + memcpy(dst, src, copy_len); + + off += copy_len; + len -= copy_len; + buf += copy_len; + } + + if (!len || next_frag == end_frag) + break; + + ptr_off += ptr_len; + ptr_buf = skb_frag_address(next_frag); + ptr_len = skb_frag_size(next_frag); + next_frag++; + } +} + +static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + u32 size = xdp->data_end - xdp->data; + void *addr = xdp->data; + int i; + + if (unlikely(offset > 0xffff || len > 0xffff)) + return ERR_PTR(-EFAULT); + + if (offset + len > xdp_get_buff_len(xdp)) + return ERR_PTR(-EINVAL); + + if (offset < size) /* linear area */ + goto out; + + offset -= size; + for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ + u32 frag_size = skb_frag_size(&sinfo->frags[i]); + + if (offset < frag_size) { + addr = skb_frag_address(&sinfo->frags[i]); + size = frag_size; + break; + } + offset -= frag_size; + } +out: + return offset + len < size ? addr + offset : NULL; +} + +BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset, + void *, buf, u32, len) +{ + void *ptr; + + ptr = bpf_xdp_pointer(xdp, offset, len); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + if (!ptr) + bpf_xdp_copy_buf(xdp, offset, buf, len, false); + else + memcpy(buf, ptr, len); + + return 0; +} + +static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { + .func = bpf_xdp_load_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, +}; + +BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, + void *, buf, u32, len) +{ + void *ptr; + + ptr = bpf_xdp_pointer(xdp, offset, len); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + if (!ptr) + bpf_xdp_copy_buf(xdp, offset, buf, len, true); + else + memcpy(ptr, buf, len); + + return 0; +} + +static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { + .func = bpf_xdp_store_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, +}; + +static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1]; + struct xdp_rxq_info *rxq = xdp->rxq; + unsigned int tailroom; + + if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz) + return -EOPNOTSUPP; + + tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag); + if (unlikely(offset > tailroom)) + return -EINVAL; + + memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); + skb_frag_size_add(frag, offset); + sinfo->xdp_frags_size += offset; + + return 0; +} + +static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int i, n_frags_free = 0, len_free = 0; + + if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN)) + return -EINVAL; + + for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) { + skb_frag_t *frag = &sinfo->frags[i]; + int shrink = min_t(int, offset, skb_frag_size(frag)); + + len_free += shrink; + offset -= shrink; + + if (skb_frag_size(frag) == shrink) { + struct page *page = skb_frag_page(frag); + + __xdp_return(page_address(page), &xdp->rxq->mem, + false, NULL); + n_frags_free++; + } else { + skb_frag_size_sub(frag, shrink); + break; + } + } + sinfo->nr_frags -= n_frags_free; + sinfo->xdp_frags_size -= len_free; + + if (unlikely(!sinfo->nr_frags)) { + xdp_buff_clear_frags_flag(xdp); + xdp->data_end -= offset; + } + + return 0; +} + BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) { void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */ void *data_end = xdp->data_end + offset; + if (unlikely(xdp_buff_has_frags(xdp))) { /* non-linear xdp buff */ + if (offset < 0) + return bpf_xdp_frags_shrink_tail(xdp, -offset); + + return bpf_xdp_frags_increase_tail(xdp, offset); + } + /* Notice that xdp_data_hard_end have reserved some tailroom */ if (unlikely(data_end > data_hard_end)) return -EINVAL; @@ -4047,6 +4266,14 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); enum bpf_map_type map_type = ri->map_type; + /* XDP_REDIRECT is not fully supported yet for xdp frags since + * not all XDP capable drivers can map non-linear xdp_frame in + * ndo_xdp_xmit. + */ + if (unlikely(xdp_buff_has_frags(xdp) && + map_type != BPF_MAP_TYPE_CPUMAP)) + return -EOPNOTSUPP; + if (map_type == BPF_MAP_TYPE_XSKMAP) return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); @@ -4590,10 +4817,12 @@ static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { }; #endif -static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, +static unsigned long bpf_xdp_copy(void *dst, const void *ctx, unsigned long off, unsigned long len) { - memcpy(dst_buff, src_buff + off, len); + struct xdp_buff *xdp = (struct xdp_buff *)ctx; + + bpf_xdp_copy_buf(xdp, off, dst, len, false); return 0; } @@ -4604,11 +4833,11 @@ BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; - if (unlikely(!xdp || - xdp_size > (unsigned long)(xdp->data_end - xdp->data))) + + if (unlikely(!xdp || xdp_size > xdp_get_buff_len(xdp))) return -EFAULT; - return bpf_event_output(map, flags, meta, meta_size, xdp->data, + return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size, bpf_xdp_copy); } @@ -7533,6 +7762,12 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_redirect_map_proto; case BPF_FUNC_xdp_adjust_tail: return &bpf_xdp_adjust_tail_proto; + case BPF_FUNC_xdp_get_buff_len: + return &bpf_xdp_get_buff_len_proto; + case BPF_FUNC_xdp_load_bytes: + return &bpf_xdp_load_bytes_proto; + case BPF_FUNC_xdp_store_bytes: + return &bpf_xdp_store_bytes_proto; case BPF_FUNC_fib_lookup: return &bpf_xdp_fib_lookup_proto; case BPF_FUNC_check_mtu: diff --git a/net/core/xdp.c b/net/core/xdp.c index 7aba355049862..361df312ee7f5 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -162,8 +162,9 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) } /* Returns 0 on success, negative on failure */ -int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, - struct net_device *dev, u32 queue_index, unsigned int napi_id) +int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index, + unsigned int napi_id, u32 frag_size) { if (!dev) { WARN(1, "Missing net_device from driver"); @@ -185,11 +186,12 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, xdp_rxq->dev = dev; xdp_rxq->queue_index = queue_index; xdp_rxq->napi_id = napi_id; + xdp_rxq->frag_size = frag_size; xdp_rxq->reg_state = REG_STATE_REGISTERED; return 0; } -EXPORT_SYMBOL_GPL(xdp_rxq_info_reg); +EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) { @@ -369,8 +371,8 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); * is used for those calls sites. Thus, allowing for faster recycling * of xdp_frames/pages in those cases. */ -static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, - struct xdp_buff *xdp) +void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, + struct xdp_buff *xdp) { struct xdp_mem_allocator *xa; struct page *page; @@ -406,12 +408,38 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, void xdp_return_frame(struct xdp_frame *xdpf) { + struct skb_shared_info *sinfo; + int i; + + if (likely(!xdp_frame_has_frags(xdpf))) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_return(page_address(page), &xdpf->mem, false, NULL); + } +out: __xdp_return(xdpf->data, &xdpf->mem, false, NULL); } EXPORT_SYMBOL_GPL(xdp_return_frame); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) { + struct skb_shared_info *sinfo; + int i; + + if (likely(!xdp_frame_has_frags(xdpf))) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_return(page_address(page), &xdpf->mem, true, NULL); + } +out: __xdp_return(xdpf->data, &xdpf->mem, true, NULL); } EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); @@ -447,7 +475,7 @@ void xdp_return_frame_bulk(struct xdp_frame *xdpf, struct xdp_mem_allocator *xa; if (mem->type != MEM_TYPE_PAGE_POOL) { - __xdp_return(xdpf->data, &xdpf->mem, false, NULL); + xdp_return_frame(xdpf); return; } @@ -466,12 +494,38 @@ void xdp_return_frame_bulk(struct xdp_frame *xdpf, bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); } + if (unlikely(xdp_frame_has_frags(xdpf))) { + struct skb_shared_info *sinfo; + int i; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + bq->q[bq->count++] = skb_frag_address(frag); + if (bq->count == XDP_BULK_QUEUE_SIZE) + xdp_flush_frame_bulk(bq); + } + } bq->q[bq->count++] = xdpf->data; } EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); void xdp_return_buff(struct xdp_buff *xdp) { + struct skb_shared_info *sinfo; + int i; + + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + sinfo = xdp_get_shared_info_from_buff(xdp); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp); + } +out: __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); } @@ -561,8 +615,14 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct sk_buff *skb, struct net_device *dev) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); unsigned int headroom, frame_size; void *hard_start; + u8 nr_frags; + + /* xdp frags frame */ + if (unlikely(xdp_frame_has_frags(xdpf))) + nr_frags = sinfo->nr_frags; /* Part of headroom was reserved to xdpf */ headroom = sizeof(*xdpf) + xdpf->headroom; @@ -582,6 +642,12 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, if (xdpf->metasize) skb_metadata_set(skb, xdpf->metasize); + if (unlikely(xdp_frame_has_frags(xdpf))) + xdp_update_skb_shared_info(skb, nr_frags, + sinfo->xdp_frags_size, + nr_frags * xdpf->frame_sz, + xdp_frame_is_frag_pfmemalloc(xdpf)); + /* Essential SKB info: protocol and skb->dev */ skb->protocol = eth_type_trans(skb, dev); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index fe2272defcd95..16a7574292a53 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1113,6 +1113,11 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -5049,6 +5054,28 @@ union bpf_attr { * This helper is currently supported by cgroup programs only. * Return * 0 on success, or a negative error in case of failure. + * + * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) + * Description + * Get the total size of a given xdp buff (linear and paged area) + * Return + * The total size of a given xdp buffer. + * + * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * This helper is provided as an easy way to load data from a + * xdp buffer. It can be used to load *len* bytes from *offset* from + * the frame associated to *xdp_md*, into the buffer pointed by + * *buf*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * Store *len* bytes from buffer *buf* into the frame + * associated to *xdp_md*, at *offset*. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5239,6 +5266,9 @@ union bpf_attr { FN(get_func_arg_cnt), \ FN(get_retval), \ FN(set_retval), \ + FN(xdp_get_buff_len), \ + FN(xdp_load_bytes), \ + FN(xdp_store_bytes), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fc6d530e20db9..a8c750373ad56 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -235,6 +235,8 @@ enum sec_def_flags { SEC_SLEEPABLE = 8, /* allow non-strict prefix matching */ SEC_SLOPPY_PFX = 16, + /* BPF program support non-linear XDP buffer */ + SEC_XDP_FRAGS = 32, }; struct bpf_sec_def { @@ -6570,6 +6572,9 @@ static int libbpf_preload_prog(struct bpf_program *prog, if (def & SEC_SLEEPABLE) opts->prog_flags |= BPF_F_SLEEPABLE; + if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) + opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; + if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { @@ -8608,8 +8613,11 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), + SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), + SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), + SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX), SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX), diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c new file mode 100644 index 0000000000000..31c188666e81c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +void test_xdp_update_frags(void) +{ + const char *file = "./test_xdp_update_frags.o"; + __u32 duration, retval, size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, prog_fd; + __u32 *offset; + __u8 *buf; + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(128); + if (!ASSERT_OK_PTR(buf, "alloc buf 128b")) + goto out; + + memset(buf, 0, 128); + offset = (__u32 *)buf; + *offset = 16; + buf[*offset] = 0xaa; /* marker at offset 16 (head) */ + buf[*offset + 15] = 0xaa; /* marker at offset 31 (head) */ + + err = bpf_prog_test_run(prog_fd, 1, buf, 128, + buf, &size, &retval, &duration); + + /* test_xdp_update_frags: buf[16,31]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[16], 0xbb, "xdp_update_frag buf[16]"); + ASSERT_EQ(buf[31], 0xbb, "xdp_update_frag buf[31]"); + + free(buf); + + buf = malloc(9000); + if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) + goto out; + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 5000; + buf[*offset] = 0xaa; /* marker at offset 5000 (frag0) */ + buf[*offset + 15] = 0xaa; /* marker at offset 5015 (frag0) */ + + err = bpf_prog_test_run(prog_fd, 1, buf, 9000, + buf, &size, &retval, &duration); + + /* test_xdp_update_frags: buf[5000,5015]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[5000], 0xbb, "xdp_update_frag buf[5000]"); + ASSERT_EQ(buf[5015], 0xbb, "xdp_update_frag buf[5015]"); + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 3510; + buf[*offset] = 0xaa; /* marker at offset 3510 (head) */ + buf[*offset + 15] = 0xaa; /* marker at offset 3525 (frag0) */ + + err = bpf_prog_test_run(prog_fd, 1, buf, 9000, + buf, &size, &retval, &duration); + + /* test_xdp_update_frags: buf[3510,3525]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[3510], 0xbb, "xdp_update_frag buf[3510]"); + ASSERT_EQ(buf[3525], 0xbb, "xdp_update_frag buf[3525]"); + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 7606; + buf[*offset] = 0xaa; /* marker at offset 7606 (frag0) */ + buf[*offset + 15] = 0xaa; /* marker at offset 7621 (frag1) */ + + err = bpf_prog_test_run(prog_fd, 1, buf, 9000, + buf, &size, &retval, &duration); + + /* test_xdp_update_frags: buf[7606,7621]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[7606], 0xbb, "xdp_update_frag buf[7606]"); + ASSERT_EQ(buf[7621], 0xbb, "xdp_update_frag buf[7621]"); + + free(buf); +out: + bpf_object__close(obj); +} + +void test_xdp_adjust_frags(void) +{ + if (test__start_subtest("xdp_adjust_frags")) + test_xdp_update_frags(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index d5ebe92b0ebb0..ccc9e63254a89 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -118,6 +118,127 @@ static void test_xdp_adjust_tail_grow2(void) bpf_object__close(obj); } +void test_xdp_adjust_frags_tail_shrink(void) +{ + const char *file = "./test_xdp_adjust_tail_shrink.o"; + __u32 duration, retval, size, exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, prog_fd; + __u8 *buf; + + /* For the individual test cases, the first byte in the packet + * indicates which test will be run. + */ + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(9000); + if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) + goto out; + + memset(buf, 0, 9000); + + /* Test case removing 10 bytes from last frag, NOT freeing it */ + exp_size = 8990; /* 9000 - 10 */ + err = bpf_prog_test_run(prog_fd, 1, buf, 9000, + buf, &size, &retval, &duration); + + ASSERT_OK(err, "9Kb-10b"); + ASSERT_EQ(retval, XDP_TX, "9Kb-10b retval"); + ASSERT_EQ(size, exp_size, "9Kb-10b size"); + + /* Test case removing one of two pages, assuming 4K pages */ + buf[0] = 1; + exp_size = 4900; /* 9000 - 4100 */ + err = bpf_prog_test_run(prog_fd, 1, buf, 9000, + buf, &size, &retval, &duration); + + ASSERT_OK(err, "9Kb-4Kb"); + ASSERT_EQ(retval, XDP_TX, "9Kb-4Kb retval"); + ASSERT_EQ(size, exp_size, "9Kb-4Kb size"); + + /* Test case removing two pages resulting in a linear xdp_buff */ + buf[0] = 2; + exp_size = 800; /* 9000 - 8200 */ + err = bpf_prog_test_run(prog_fd, 1, buf, 9000, + buf, &size, &retval, &duration); + + ASSERT_OK(err, "9Kb-9Kb"); + ASSERT_EQ(retval, XDP_TX, "9Kb-9Kb retval"); + ASSERT_EQ(size, exp_size, "9Kb-9Kb size"); + + free(buf); +out: + bpf_object__close(obj); +} + +void test_xdp_adjust_frags_tail_grow(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.o"; + __u32 duration, retval, size, exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, i, prog_fd; + __u8 *buf; + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(16384); + if (!ASSERT_OK_PTR(buf, "alloc buf 16Kb")) + goto out; + + /* Test case add 10 bytes to last frag */ + memset(buf, 1, 16384); + size = 9000; + exp_size = size + 10; + err = bpf_prog_test_run(prog_fd, 1, buf, size, + buf, &size, &retval, &duration); + + ASSERT_OK(err, "9Kb+10b"); + ASSERT_EQ(retval, XDP_TX, "9Kb+10b retval"); + ASSERT_EQ(size, exp_size, "9Kb+10b size"); + + for (i = 0; i < 9000; i++) + ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + + for (i = 9000; i < 9010; i++) + ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + + for (i = 9010; i < 16384; i++) + ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + + /* Test a too large grow */ + memset(buf, 1, 16384); + size = 9001; + exp_size = size; + err = bpf_prog_test_run(prog_fd, 1, buf, size, + buf, &size, &retval, &duration); + + ASSERT_OK(err, "9Kb+10b"); + ASSERT_EQ(retval, XDP_DROP, "9Kb+10b retval"); + ASSERT_EQ(size, exp_size, "9Kb+10b size"); + + free(buf); +out: + bpf_object__close(obj); +} + void test_xdp_adjust_tail(void) { if (test__start_subtest("xdp_adjust_tail_shrink")) @@ -126,4 +247,8 @@ void test_xdp_adjust_tail(void) test_xdp_adjust_tail_grow(); if (test__start_subtest("xdp_adjust_tail_grow2")) test_xdp_adjust_tail_grow2(); + if (test__start_subtest("xdp_adjust_frags_tail_shrink")) + test_xdp_adjust_frags_tail_shrink(); + if (test__start_subtest("xdp_adjust_frags_tail_grow")) + test_xdp_adjust_frags_tail_grow(); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c index 500a302cb3e99..9c395ea680c68 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -10,28 +10,97 @@ struct meta { int pkt_len; }; +struct test_ctx_s { + bool passed; + int pkt_size; +}; + +struct test_ctx_s test_ctx; + static void on_sample(void *ctx, int cpu, void *data, __u32 size) { struct meta *meta = (struct meta *)data; struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta); + unsigned char *raw_pkt = data + sizeof(*meta); + struct test_ctx_s *tst_ctx = ctx; ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size"); ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex"); - ASSERT_EQ(meta->pkt_len, sizeof(pkt_v4), "check_meta_pkt_len"); + ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len"); ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0, "check_packet_content"); - *(bool *)ctx = true; + if (meta->pkt_len > sizeof(pkt_v4)) { + for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++) + ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i, + "check_packet_content"); + } + + tst_ctx->passed = true; } -void test_xdp_bpf2bpf(void) +#define BUF_SZ 9000 + +static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb, + struct test_xdp_bpf2bpf *ftrace_skel, + int pkt_size) { __u32 duration = 0, retval, size; - char buf[128]; + __u8 *buf, *buf_in; + int err; + + if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") || + !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size")) + return; + + buf_in = malloc(BUF_SZ); + if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()")) + return; + + buf = malloc(BUF_SZ); + if (!ASSERT_OK_PTR(buf, "buf malloc()")) { + free(buf_in); + return; + } + + test_ctx.passed = false; + test_ctx.pkt_size = pkt_size; + + memcpy(buf_in, &pkt_v4, sizeof(pkt_v4)); + if (pkt_size > sizeof(pkt_v4)) { + for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++) + buf_in[i + sizeof(pkt_v4)] = i; + } + + /* Run test program */ + err = bpf_prog_test_run(pkt_fd, 1, buf_in, pkt_size, + buf, &size, &retval, &duration); + + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(retval, XDP_PASS, "ipv4 retval"); + ASSERT_EQ(size, pkt_size, "ipv4 size"); + + /* Make sure bpf_xdp_output() was triggered and it sent the expected + * data to the perf ring buffer. + */ + err = perf_buffer__poll(pb, 100); + + ASSERT_GE(err, 0, "perf_buffer__poll"); + ASSERT_TRUE(test_ctx.passed, "test passed"); + /* Verify test results */ + ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"), + "fentry result"); + ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result"); + + free(buf); + free(buf_in); +} + +void test_xdp_bpf2bpf(void) +{ int err, pkt_fd, map_fd; - bool passed = false; - struct iphdr iph; - struct iptnl_info value4 = {.family = AF_INET}; + int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200}; + struct iptnl_info value4 = {.family = AF_INET6}; struct test_xdp *pkt_skel = NULL; struct test_xdp_bpf2bpf *ftrace_skel = NULL; struct vip key4 = {.protocol = 6, .family = AF_INET}; @@ -73,32 +142,14 @@ void test_xdp_bpf2bpf(void) goto out; /* Set up perf buffer */ - pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 1, - on_sample, NULL, &passed, NULL); + pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8, + on_sample, NULL, &test_ctx, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto out; - /* Run test program */ - err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); - - ASSERT_OK(err, "ipv4"); - ASSERT_EQ(retval, XDP_TX, "ipv4 retval"); - ASSERT_EQ(size, 74, "ipv4 size"); - ASSERT_EQ(iph.protocol, IPPROTO_IPIP, "ipv4 proto"); - - /* Make sure bpf_xdp_output() was triggered and it sent the expected - * data to the perf ring buffer. - */ - err = perf_buffer__poll(pb, 100); - - ASSERT_GE(err, 0, "perf_buffer__poll"); - ASSERT_TRUE(passed, "test passed"); - /* Verify test results */ - ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"), - "fentry result"); - ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_TX, "fexit result"); + for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++) + run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel, + pkt_sizes[i]); out: perf_buffer__free(pb); test_xdp__destroy(pkt_skel); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index abe9d9f988ec4..b353e1f3acb5b 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -3,11 +3,12 @@ #include #include +#include "test_xdp_with_cpumap_frags_helpers.skel.h" #include "test_xdp_with_cpumap_helpers.skel.h" #define IFINDEX_LO 1 -void serial_test_xdp_cpumap_attach(void) +void test_xdp_with_cpumap_helpers(void) { struct test_xdp_with_cpumap_helpers *skel; struct bpf_prog_info info = {}; @@ -54,6 +55,67 @@ void serial_test_xdp_cpumap_attach(void) err = bpf_map_update_elem(map_fd, &idx, &val, 0); ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry"); + /* Try to attach BPF_XDP program with frags to cpumap when we have + * already loaded a BPF_XDP program on the map + */ + idx = 1; + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry"); + out_close: test_xdp_with_cpumap_helpers__destroy(skel); } + +void test_xdp_with_cpumap_frags_helpers(void) +{ + struct test_xdp_with_cpumap_frags_helpers *skel; + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + struct bpf_cpumap_val val = { + .qsize = 192, + }; + int err, frags_prog_fd, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_cpumap_frags_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load")) + return; + + frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); + map_fd = bpf_map__fd(skel->maps.cpu_map); + err = bpf_obj_get_info_by_fd(frags_prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = frags_prog_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add program to cpumap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read cpumap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, + "Match program id to cpumap entry prog_id"); + + /* Try to attach BPF_XDP program to cpumap when we have + * already loaded a BPF_XDP program with frags on the map + */ + idx = 1; + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program to cpumap entry"); + +out_close: + test_xdp_with_cpumap_frags_helpers__destroy(skel); +} + +void serial_test_xdp_cpumap_attach(void) +{ + if (test__start_subtest("CPUMAP with programs in entries")) + test_xdp_with_cpumap_helpers(); + + if (test__start_subtest("CPUMAP with frags programs in entries")) + test_xdp_with_cpumap_frags_helpers(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index fc1a40c3193b6..463a72fc3e708 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -4,6 +4,7 @@ #include #include "test_xdp_devmap_helpers.skel.h" +#include "test_xdp_with_devmap_frags_helpers.skel.h" #include "test_xdp_with_devmap_helpers.skel.h" #define IFINDEX_LO 1 @@ -56,6 +57,15 @@ static void test_xdp_with_devmap_helpers(void) err = bpf_map_update_elem(map_fd, &idx, &val, 0); ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry"); + /* Try to attach BPF_XDP program with frags to devmap when we have + * already loaded a BPF_XDP program on the map + */ + idx = 1; + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry"); + out_close: test_xdp_with_devmap_helpers__destroy(skel); } @@ -71,12 +81,57 @@ static void test_neg_xdp_devmap_helpers(void) } } +void test_xdp_with_devmap_frags_helpers(void) +{ + struct test_xdp_with_devmap_frags_helpers *skel; + struct bpf_prog_info info = {}; + struct bpf_devmap_val val = { + .ifindex = IFINDEX_LO, + }; + __u32 len = sizeof(info); + int err, dm_fd_frags, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_devmap_frags_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) + return; + + dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); + map_fd = bpf_map__fd(skel->maps.dm_ports); + err = bpf_obj_get_info_by_fd(dm_fd_frags, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = dm_fd_frags; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add frags program to devmap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read devmap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, + "Match program id to devmap entry prog_id"); + + /* Try to attach BPF_XDP program to devmap when we have + * already loaded a BPF_XDP program with frags on the map + */ + idx = 1; + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program to devmap entry"); + +out_close: + test_xdp_with_devmap_frags_helpers__destroy(skel); +} void serial_test_xdp_devmap_attach(void) { if (test__start_subtest("DEVMAP with programs in entries")) test_xdp_with_devmap_helpers(); + if (test__start_subtest("DEVMAP with frags programs in entries")) + test_xdp_with_devmap_frags_helpers(); + if (test__start_subtest("Verifier check of DEVMAP programs")) test_neg_xdp_devmap_helpers(); } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index 199c61b7d0628..53b64c9994500 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -7,11 +7,10 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; void *data = (void *)(long)xdp->data; - unsigned int data_len; + int data_len = bpf_xdp_get_buff_len(xdp); int offset = 0; /* Data length determine test case */ - data_len = data_end - data; if (data_len == 54) { /* sizeof(pkt_v4) */ offset = 4096; /* test too large offset */ @@ -20,7 +19,12 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) } else if (data_len == 64) { offset = 128; } else if (data_len == 128) { - offset = 4096 - 256 - 320 - data_len; /* Max tail grow 3520 */ + /* Max tail grow 3520 */ + offset = 4096 - 256 - 320 - data_len; + } else if (data_len == 9000) { + offset = 10; + } else if (data_len == 9001) { + offset = 4096; } else { return XDP_ABORTED; /* No matching test */ } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c index b7448253d1359..ca68c038357c3 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c @@ -12,14 +12,38 @@ SEC("xdp") int _xdp_adjust_tail_shrink(struct xdp_md *xdp) { - void *data_end = (void *)(long)xdp->data_end; - void *data = (void *)(long)xdp->data; + __u8 *data_end = (void *)(long)xdp->data_end; + __u8 *data = (void *)(long)xdp->data; int offset = 0; - if (data_end - data == 54) /* sizeof(pkt_v4) */ + switch (bpf_xdp_get_buff_len(xdp)) { + case 54: + /* sizeof(pkt_v4) */ offset = 256; /* shrink too much */ - else + break; + case 9000: + /* non-linear buff test cases */ + if (data + 1 > data_end) + return XDP_DROP; + + switch (data[0]) { + case 0: + offset = 10; + break; + case 1: + offset = 4100; + break; + case 2: + offset = 8200; + break; + default: + return XDP_DROP; + } + break; + default: offset = 20; + break; + } if (bpf_xdp_adjust_tail(xdp, 0 - offset)) return XDP_DROP; return XDP_TX; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c index 58cf4345f5cc9..3379d303f41a9 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c @@ -49,7 +49,7 @@ int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) void *data = (void *)(long)xdp->data; meta.ifindex = xdp->rxq->dev->ifindex; - meta.pkt_len = data_end - data; + meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp); bpf_xdp_output(xdp, &perf_buf_map, ((__u64) meta.pkt_len << 32) | BPF_F_CURRENT_CPU, diff --git a/tools/testing/selftests/bpf/progs/test_xdp_update_frags.c b/tools/testing/selftests/bpf/progs/test_xdp_update_frags.c new file mode 100644 index 0000000000000..2a3496d8e3273 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_update_frags.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include + +int _version SEC("version") = 1; + +SEC("xdp.frags") +int xdp_adjust_frags(struct xdp_md *xdp) +{ + __u8 *data_end = (void *)(long)xdp->data_end; + __u8 *data = (void *)(long)xdp->data; + __u8 val[16] = {}; + __u32 offset; + int err; + + if (data + sizeof(__u32) > data_end) + return XDP_DROP; + + offset = *(__u32 *)data; + err = bpf_xdp_load_bytes(xdp, offset, val, sizeof(val)); + if (err < 0) + return XDP_DROP; + + if (val[0] != 0xaa || val[15] != 0xaa) /* marker */ + return XDP_DROP; + + val[0] = 0xbb; /* update the marker */ + val[15] = 0xbb; + err = bpf_xdp_store_bytes(xdp, offset, val, sizeof(val)); + if (err < 0) + return XDP_DROP; + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c new file mode 100644 index 0000000000000..62fb7cd4d87ac --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#define IFINDEX_LO 1 + +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_cpumap_val)); + __uint(max_entries, 4); +} cpu_map SEC(".maps"); + +SEC("xdp_cpumap/dummy_cm") +int xdp_dummy_cm(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +SEC("xdp.frags/cpumap") +int xdp_dummy_cm_frags(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c index 532025057711e..48007f17dfa86 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c @@ -33,4 +33,10 @@ int xdp_dummy_cm(struct xdp_md *ctx) return XDP_PASS; } +SEC("xdp.frags/cpumap") +int xdp_dummy_cm_frags(struct xdp_md *ctx) +{ + return XDP_PASS; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c new file mode 100644 index 0000000000000..e1caf510b7d28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_devmap_val)); + __uint(max_entries, 4); +} dm_ports SEC(".maps"); + +/* valid program on DEVMAP entry via SEC name; + * has access to egress and ingress ifindex + */ +SEC("xdp_devmap/map_prog") +int xdp_dummy_dm(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +SEC("xdp.frags/devmap") +int xdp_dummy_dm_frags(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c index 1e6b9c38ea6d9..8ae11fab8316d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c @@ -40,4 +40,11 @@ int xdp_dummy_dm(struct xdp_md *ctx) return XDP_PASS; } + +SEC("xdp.frags/devmap") +int xdp_dummy_dm_frags(struct xdp_md *ctx) +{ + return XDP_PASS; +} + char _license[] SEC("license") = "GPL";