Skip to content

Commit

Permalink
gve: Remove dependency on 4k page size.
Browse files Browse the repository at this point in the history
Prior to this change, gve crashes when attempting to run in kernels with
page sizes other than 4k. This change removes unnecessary references to
PAGE_SIZE and replaces them with more meaningful constants.

Signed-off-by: Jordan Kimbrough <jrkim@google.com>
Signed-off-by: John Fraker <jfraker@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/r/20231128002648.320892-6-jfraker@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
John Fraker authored and Jakub Kicinski committed Nov 29, 2023
1 parent 513072f commit da7d4b4
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 10 deletions.
4 changes: 3 additions & 1 deletion drivers/net/ethernet/google/gve/gve.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024

#define GVE_RX_BUFFER_SIZE_DQO 2048
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048

#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048

#define GVE_XDP_ACTIONS 5

Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/google/gve/gve_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
(PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;

len = *(u32 *)value;
if (len > max_copybreak)
Expand Down
4 changes: 2 additions & 2 deletions drivers/net/ethernet/google/gve/gve_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1328,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
Expand Down Expand Up @@ -1664,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}

if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
Expand Down
9 changes: 4 additions & 5 deletions drivers/net/ethernet/google/gve/gve_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
rx->packet_buffer_size = PAGE_SIZE / 2;
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);

Expand Down Expand Up @@ -399,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,

static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);

/* "flip" to other packet buffer on this page */
page_info->page_offset ^= PAGE_SIZE / 2;
page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}

Expand Down Expand Up @@ -507,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL;

gve_dec_pagecnt_bias(copy_page_info);
copy_page_info->page_offset += rx->packet_buffer_size;
copy_page_info->page_offset &= (PAGE_SIZE - 1);
copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;

if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/google/gve/gve_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
}

#define GVE_TX_START_THRESH PAGE_SIZE
#define GVE_TX_START_THRESH 4096

static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake)
Expand Down

0 comments on commit da7d4b4

Please sign in to comment.