Skip to content

Commit

Permalink
gve: adopt page pool for DQ RDA mode
Browse files Browse the repository at this point in the history
For DQ queue format in raw DMA addressing(RDA) mode,
implement page pool recycling of buffers by leveraging
a few helper functions.

DQ QPL mode will continue to use the exisiting recycling
logic. This is because in QPL mode, the pages come from a
constant set of pages that the driver pre-allocates and
registers with the device.

Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Shailend Chand <shailend@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Link: https://patch.msgid.link/20241014202108.1051963-3-pkaligineedi@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Harshitha Ramamurthy authored and Jakub Kicinski committed Oct 16, 2024
1 parent 93c68f1 commit ebdfae0
Show file tree
Hide file tree
Showing 4 changed files with 198 additions and 94 deletions.
1 change: 1 addition & 0 deletions drivers/net/ethernet/google/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"

Expand Down
22 changes: 20 additions & 2 deletions drivers/net/ethernet/google/gve/gve.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>

#include "gve_desc.h"
Expand Down Expand Up @@ -60,6 +61,8 @@

#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048

#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4

#define GVE_FLOW_RULES_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
Expand Down Expand Up @@ -102,6 +105,7 @@ struct gve_rx_slot_page_info {
struct page *page;
void *page_address;
u32 page_offset; /* offset to write to in page */
unsigned int buf_size;
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
u16 pad; /* adjustment for rx padding */
u8 can_flip; /* tracks if the networking stack is using the page */
Expand Down Expand Up @@ -273,6 +277,8 @@ struct gve_rx_ring {

/* Address info of the buffers for header-split */
struct gve_header_buf hdr_bufs;

struct page_pool *page_pool;
} dqo;
};

Expand Down Expand Up @@ -1176,10 +1182,22 @@ struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
struct gve_rx_buf_state_dqo *buf_state);
struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
int gve_alloc_page_dqo(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct);
int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
struct gve_rx_ring *rx);

/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
Expand Down
180 changes: 131 additions & 49 deletions drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,6 @@ int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
}

void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
bool free_page)
{
page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
if (free_page)
gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
DMA_FROM_DEVICE);
bs->page_info.page = NULL;
}

struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
{
struct gve_rx_buf_state_dqo *buf_state;
Expand Down Expand Up @@ -128,56 +118,28 @@ struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
}

/* For QPL, we cannot allocate any new buffers and must
* wait for the existing ones to be available.
*/
if (rx->dqo.qpl)
return NULL;

/* If there are no free buf states discard an entry from
* `used_buf_states` so it can be used.
*/
if (unlikely(rx->dqo.free_buf_states == -1)) {
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
if (gve_buf_ref_cnt(buf_state) == 0)
return buf_state;

gve_free_page_dqo(rx->gve, buf_state, true);
gve_free_buf_state(rx, buf_state);
}

return NULL;
}

int gve_alloc_page_dqo(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
struct gve_priv *priv = rx->gve;
u32 idx;

if (!rx->dqo.qpl) {
int err;

err = gve_alloc_page(priv, &priv->pdev->dev,
&buf_state->page_info.page,
&buf_state->addr,
DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
}
buf_state->page_info.page = rx->dqo.qpl->pages[idx];
buf_state->addr = rx->dqo.qpl->page_buses[idx];
rx->dqo.next_qpl_page_idx++;
idx = rx->dqo.next_qpl_page_idx;
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
}
buf_state->page_info.page = rx->dqo.qpl->pages[idx];
buf_state->addr = rx->dqo.qpl->page_buses[idx];
rx->dqo.next_qpl_page_idx++;
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
buf_state->last_single_ref_offset = 0;

/* The page already has 1 ref. */
Expand All @@ -187,6 +149,16 @@ int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0;
}

void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
{
if (!buf_state->page_info.page)
return;

page_ref_sub(buf_state->page_info.page,
buf_state->page_info.pagecnt_bias - 1);
buf_state->page_info.page = NULL;
}

void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
Expand Down Expand Up @@ -228,3 +200,113 @@ void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
rx->dqo.used_buf_states_cnt++;
}

void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct)
{
struct page *page = buf_state->page_info.page;

if (!page)
return;

page_pool_put_page(page->pp, page, buf_state->page_info.buf_size,
allow_direct);
buf_state->page_info.page = NULL;
}

static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
struct gve_priv *priv = rx->gve;
struct page *page;

buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
page = page_pool_alloc(rx->dqo.page_pool,
&buf_state->page_info.page_offset,
&buf_state->page_info.buf_size, GFP_ATOMIC);

if (!page)
return -ENOMEM;

buf_state->page_info.page = page;
buf_state->page_info.page_address = page_address(page);
buf_state->addr = page_pool_get_dma_addr(page);

return 0;
}

struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
struct gve_rx_ring *rx)
{
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
struct page_pool_params pp = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
.dev = &priv->pdev->dev,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
.max_len = PAGE_SIZE,
.dma_dir = DMA_FROM_DEVICE,
};

return page_pool_create(&pp);
}

void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
gve_free_to_page_pool(rx, buf_state, true);
gve_free_buf_state(rx, buf_state);
} else {
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
buf_state);
}
}

void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
buf_state->page_info.page = NULL;
gve_free_buf_state(rx, buf_state);
} else {
gve_dec_pagecnt_bias(&buf_state->page_info);
gve_try_recycle_buf(rx->gve, rx, buf_state);
}
}

int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
{
struct gve_rx_buf_state_dqo *buf_state;

if (rx->dqo.page_pool) {
buf_state = gve_alloc_buf_state(rx);
if (WARN_ON_ONCE(!buf_state))
return -ENOMEM;

if (gve_alloc_from_page_pool(rx, buf_state))
goto free_buf_state;
} else {
buf_state = gve_get_recycled_buf_state(rx);
if (unlikely(!buf_state)) {
buf_state = gve_alloc_buf_state(rx);
if (unlikely(!buf_state))
return -ENOMEM;

if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
goto free_buf_state;
}
}
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
buf_state->page_info.page_offset);

return 0;

free_buf_state:
gve_free_buf_state(rx, buf_state);
return -ENOMEM;
}
Loading

0 comments on commit ebdfae0

Please sign in to comment.