Skip to content

Commit

Permalink
gve: Switch to config-aware queue allocation
Browse files Browse the repository at this point in the history
The new config-aware functions will help achieve the goal of being able
to allocate resources for new queues while there already are active
queues serving traffic.

These new functions work off of arbitrary queue allocation configs
rather than just the currently active config in priv, and they return
the newly allocated resources instead of writing them into priv.

Signed-off-by: Shailend Chand <shailend@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Jeroen de Borst <jeroendb@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/20240122182632.1102721-4-shailend@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Shailend Chand authored and Jakub Kicinski committed Jan 24, 2024
1 parent 1dfc2e4 commit f13697c
Show file tree
Hide file tree
Showing 9 changed files with 745 additions and 425 deletions.
92 changes: 50 additions & 42 deletions drivers/net/ethernet/google/gve/gve.h
Original file line number Diff line number Diff line change
Expand Up @@ -966,14 +966,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
priv->queue_format == GVE_DQO_QPL_FORMAT;
}

/* Returns the number of tx queue page lists
*/
static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
/* Returns the number of tx queue page lists */
static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
int num_xdp_queues,
bool is_qpl)
{
if (!gve_is_qpl(priv))
if (!is_qpl)
return 0;

return priv->tx_cfg.num_queues + priv->num_xdp_queues;
return tx_cfg->num_queues + num_xdp_queues;
}

/* Returns the number of XDP tx queue page lists
Expand All @@ -986,14 +986,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
return priv->num_xdp_queues;
}

/* Returns the number of rx queue page lists
*/
static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
/* Returns the number of rx queue page lists */
static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
bool is_qpl)
{
if (!gve_is_qpl(priv))
if (!is_qpl)
return 0;

return priv->rx_cfg.num_queues;
return rx_cfg->num_queues;
}

static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
Expand All @@ -1006,59 +1005,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}

/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}

static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}

static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
/* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
return gve_rx_qpl_id(priv, 0);
return gve_get_rx_qpl_id(tx_cfg, 0);
}

/* Returns a pointer to the next available tx qpl in the list of qpls
*/
/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
int tx_qid)
{
int id = gve_tx_qpl_id(priv, tx_qid);

/* QPL already in use */
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
return NULL;

set_bit(id, priv->qpl_cfg.qpl_id_map);
return &priv->qpls[id];
set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
return &cfg->qpls[tx_qid];
}

/* Returns a pointer to the next available rx qpl in the list of qpls
*/
/* Returns a pointer to the next available rx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
int rx_qid)
{
int id = gve_rx_qpl_id(priv, rx_qid);

int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
/* QPL already in use */
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
return NULL;

set_bit(id, priv->qpl_cfg.qpl_id_map);
return &priv->qpls[id];
set_bit(id, cfg->qpl_cfg->qpl_id_map);
return &cfg->qpls[id];
}

/* Unassigns the qpl with the given id
*/
static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
/* Unassigns the qpl with the given id */
static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
{
clear_bit(id, priv->qpl_cfg.qpl_id_map);
clear_bit(id, qpl_cfg->qpl_id_map);
}

/* Returns the correct dma direction for tx and rx qpls
*/
/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
if (id < gve_rx_start_qpl_id(priv))
if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
Expand Down Expand Up @@ -1103,8 +1102,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
Expand All @@ -1113,7 +1116,12 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
Expand Down
16 changes: 12 additions & 4 deletions drivers/net/ethernet/google/gve/gve_dqo.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
void gve_tx_free_rings_dqo(struct gve_priv *priv);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
void gve_rx_free_rings_dqo(struct gve_priv *priv);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
Expand Down
Loading

0 comments on commit f13697c

Please sign in to comment.