diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 80c5cecaf2b56..55c6bce5da61e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -854,6 +854,10 @@ struct i40e_vsi { u64 tx_force_wb; u64 rx_buf_failed; u64 rx_page_failed; + u64 rx_page_reuse; + u64 rx_page_alloc; + u64 rx_page_waive; + u64 rx_page_busy; /* These are containers of ring pointers, allocated at run-time */ struct i40e_ring **rx_rings; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 091f36adbbe1e..e48499624d22c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -295,6 +295,10 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_busy", tx_busy), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), + I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse), + I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc), + I40E_VSI_STAT("rx_cache_waive", rx_page_waive), + I40E_VSI_STAT("rx_cache_busy", rx_page_busy), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a95609d847ca7..9b7ce6d9a92b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -773,6 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb) **/ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) { + u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy; struct i40e_pf *pf = vsi->back; struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ns; /* netdev stats */ @@ -780,7 +781,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *es; /* device's eth stats */ u64 tx_restart, tx_busy; struct i40e_ring *p; - u64 rx_page, rx_buf; u64 bytes, packets; unsigned int start; u64 tx_linearize; @@ -806,6 +806,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; rx_page = 0; rx_buf = 0; + rx_reuse = 0; + rx_alloc = 0; + rx_waive = 0; + rx_busy = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ @@ -839,6 +843,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; rx_page += p->rx_stats.alloc_page_failed; + rx_reuse += p->rx_stats.page_reuse_count; + rx_alloc += p->rx_stats.page_alloc_count; + rx_waive += p->rx_stats.page_waive_count; + rx_busy += p->rx_stats.page_busy_count; if (i40e_enabled_xdp_vsi(vsi)) { /* locate XDP ring */ @@ -866,6 +874,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_force_wb = tx_force_wb; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; + vsi->rx_page_reuse = rx_reuse; + vsi->rx_page_alloc = rx_alloc; + vsi->rx_page_waive = rx_waive; + vsi->rx_page_busy = rx_busy; ns->rx_packets = rx_p; ns->rx_bytes = rx_b; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 66cc79500c10d..a628f4b43fe8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1382,8 +1382,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; - rx_ring->rx_stats.page_reuse_count++; - /* clear contents of buffer_info */ old_buff->page = NULL; } @@ -1675,6 +1673,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, return false; } + rx_ring->rx_stats.page_alloc_count++; + /* map page for use */ dma = dma_map_page_attrs(rx_ring->dev, page, 0, i40e_rx_pg_size(rx_ring), @@ -1982,32 +1982,43 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, /** * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx * @rx_buffer: buffer containing the page + * @rx_stats: rx stats structure for the rx ring * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call * * If page is reusable, we have a green light for calling i40e_reuse_rx_page, * which will assign the current buffer to the buffer that next_to_alloc is * pointing to; otherwise, the DMA mapping needs to be destroyed and - * page freed + * page freed. + * + * rx_stats will be updated to indicate whether the page was waived + * or busy if it could not be reused. */ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, + struct i40e_rx_queue_stats *rx_stats, int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; /* Is any reuse possible? */ - if (!dev_page_is_reusable(page)) + if (!dev_page_is_reusable(page)) { + rx_stats->page_waive_count++; return false; + } #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) { + rx_stats->page_busy_count++; return false; + } #else #define I40E_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) - if (rx_buffer->page_offset > I40E_LAST_OFFSET) + if (rx_buffer->page_offset > I40E_LAST_OFFSET) { + rx_stats->page_busy_count++; return false; + } #endif /* If we have drained the page fragment pool we need to update @@ -2237,7 +2248,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, int rx_buffer_pgcnt) { - if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 88387a644cc38..324699ec930bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -298,6 +298,9 @@ struct i40e_rx_queue_stats { u64 alloc_page_failed; u64 alloc_buff_failed; u64 page_reuse_count; + u64 page_alloc_count; + u64 page_waive_count; + u64 page_busy_count; }; enum i40e_ring_state_t {