Skip to content

Commit

Permalink
Merge branch 'intel-wired-lan-driver-updates-2024-05-29-ice-igc'
Browse files Browse the repository at this point in the history
Jacob Keller says:

====================
Intel Wired LAN Driver Updates 2024-05-29 (ice, igc)

This series includes fixes for the ice driver as well as a fix for the igc
driver.

Jacob fixes two issues in the ice driver with reading the NVM for providing
firmware data via devlink info. First, fix an off-by-one error when reading
the Preserved Fields Area, resolving an infinite loop triggered on some
NVMs which lack certain data in the NVM. Second, fix the reading of the NVM
Shadow RAM on newer E830 and E825-C devices which have a variable sized CSS
header rather than assuming this header is always the same fixed size as in
the E810 devices.

Larysa fixes three issues with the ice driver XDP logic that could occur if
the number of queues is changed after enabling an XDP program. First, the
af_xdp_zc_qps bitmap is removed and replaced by simpler logic to track
whether queues are in zero-copy mode. Second, the reset and .ndo_bpf flows
are distinguished to avoid potential races with a PF reset occuring
simultaneously to .ndo_bpf callback from userspace. Third, the logic for
mapping XDP queues to vectors is fixed so that XDP state is restored for
XDP queues after a reconfiguration.

Sasha fixes reporting of Energy Efficient Ethernet support via ethtool in
the igc driver.

v1: https://lore.kernel.org/r/20240530-net-2024-05-30-intel-net-fixes-v1-0-8b11c8c9bff8@intel.com
====================

Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-0-e3563aa89b0c@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Jun 6, 2024
2 parents 886bf91 + 7d67d11 commit 5899c88
Show file tree
Hide file tree
Showing 9 changed files with 244 additions and 104 deletions.
44 changes: 31 additions & 13 deletions drivers/net/ethernet/intel/ice/ice.h
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */

Expand Down Expand Up @@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}

/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
*
* Return: A pointer to xsk_buff_pool structure if there is a buffer pool
* attached and configured as zero-copy, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
u16 qid)
{
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);

if (!ice_is_xdp_ena_vsi(vsi))
return NULL;

return (pool && pool->dev) ? pool : NULL;
}

/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
Expand All @@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;

if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;

return xsk_get_pool_from_qid(vsi->netdev, qid);
return ice_get_xp_from_qid(vsi, qid);
}

/**
Expand All @@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;

if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
ring->xsk_pool = NULL;
return;
}

ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
}

/**
Expand Down Expand Up @@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);

enum ice_xdp_cfg {
ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
};

int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
Expand Down
3 changes: 3 additions & 0 deletions drivers/net/ethernet/intel/ice/ice_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}

if (ice_is_xdp_ena_vsi(vsi))
ice_map_xdp_rings(vsi);
}

/**
Expand Down
27 changes: 10 additions & 17 deletions drivers/net/ethernet/intel/ice/ice_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;

vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
if (!vsi->af_xdp_zc_qps)
goto err_zc_qps;

return 0;

err_zc_qps:
devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
Expand Down Expand Up @@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)

dev = ice_pf_to_dev(pf);

bitmap_free(vsi->af_xdp_zc_qps);
vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
Expand Down Expand Up @@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
if (ret)
goto unroll_vector_base;

ice_vsi_map_rings_to_vectors(vsi);

/* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi);

vsi->stat_offsets_loaded = false;

if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}

ice_vsi_map_rings_to_vectors(vsi);

/* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi);

vsi->stat_offsets_loaded = false;

/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
Expand Down Expand Up @@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
ice_destroy_xdp_rings(vsi);
ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);

ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
Expand Down
118 changes: 69 additions & 49 deletions drivers/net/ethernet/intel/ice/ice_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2707,17 +2707,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}

static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
{
struct ice_q_vector *q_vector;
struct ice_tx_ring *ring;

if (static_key_enabled(&ice_xdp_locking_key))
return vsi->xdp_rings[qid % vsi->num_xdp_txq];

q_vector = vsi->rx_rings[qid]->q_vector;
ice_for_each_tx_ring(ring, q_vector->tx)
if (ice_ring_is_xdp(ring))
return ring;

return NULL;
}

/**
* ice_map_xdp_rings - Map XDP rings to interrupt vectors
* @vsi: the VSI with XDP rings being configured
*
* Map XDP rings to interrupt vectors and perform the configuration steps
* dependent on the mapping.
*/
void ice_map_xdp_rings(struct ice_vsi *vsi)
{
int xdp_rings_rem = vsi->num_xdp_txq;
int v_idx, q_idx;

/* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
int xdp_rings_per_v, q_id, q_base;

xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
vsi->num_q_vectors - v_idx);
q_base = vsi->num_xdp_txq - xdp_rings_rem;

for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];

xdp_ring->q_vector = q_vector;
xdp_ring->next = q_vector->tx.tx_ring;
q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}

ice_for_each_rxq(vsi, q_idx) {
vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
q_idx);
ice_tx_xsk_pool(vsi, q_idx);
}
}

/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
* @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
Expand All @@ -2730,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
int i, v_idx;
int status;
int status, i;

dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
Expand All @@ -2750,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;

/* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
int xdp_rings_per_v, q_id, q_base;

xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
vsi->num_q_vectors - v_idx);
q_base = vsi->num_xdp_txq - xdp_rings_rem;

for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];

xdp_ring->q_vector = q_vector;
xdp_ring->next = q_vector->tx.tx_ring;
q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}

ice_for_each_rxq(vsi, i) {
if (static_key_enabled(&ice_xdp_locking_key)) {
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
} else {
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
struct ice_tx_ring *ring;

ice_for_each_tx_ring(ring, q_vector->tx) {
if (ice_ring_is_xdp(ring)) {
vsi->rx_rings[i]->xdp_ring = ring;
break;
}
}
}
ice_tx_xsk_pool(vsi, i);
}

/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
if (ice_is_reset_in_progress(pf->state))
if (cfg_type == ICE_XDP_CFG_PART)
return 0;

ice_map_xdp_rings(vsi);

/* tell the Tx scheduler that right now we have
* additional queues
*/
Expand Down Expand Up @@ -2842,22 +2862,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
* @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
int ice_destroy_xdp_rings(struct ice_vsi *vsi)
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;

/* q_vectors are freed in reset path so there's no point in detaching
* rings; in case of rebuild being triggered not from reset bits
* in pf->state won't be set, so additionally check first q_vector
* against NULL
* rings
*/
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;

ice_for_each_q_vector(vsi, v_idx) {
Expand Down Expand Up @@ -2898,7 +2917,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);

if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
if (cfg_type == ICE_XDP_CFG_PART)
return 0;

ice_vsi_assign_bpf_prog(vsi, NULL);
Expand Down Expand Up @@ -3009,7 +3028,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
Expand All @@ -3020,7 +3040,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi);
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */
Expand Down
Loading

0 comments on commit 5899c88

Please sign in to comment.