Skip to content

Commit

Permalink
Merge branch 'introduce-define_flex-macro'
Browse files Browse the repository at this point in the history
Przemek Kitszel says:

====================
introduce DEFINE_FLEX() macro

Add DEFINE_FLEX() macro, that helps on-stack allocation of structures
with trailing flex array member.
Expose __struct_size() macro which reads size of data allocated
by DEFINE_FLEX().

Accompany new macros introduction with actual usage,
in the ice driver - hence targeting for netdev tree.

Obvious benefits include simpler resulting code, less heap usage,
less error checking. Less obvious is the fact that compiler has
more room to optimize, and as a whole, even with more stuff on the stack,
we end up with overall better (smaller) report from bloat-o-meter:
add/remove: 8/6 grow/shrink: 7/18 up/down: 2211/-2270 (-59)
(individual results in each patch).
====================

Link: https://lore.kernel.org/r/20230912115937.1645707-1-przemyslaw.kitszel@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Oct 3, 2023
2 parents e643597 + e268b97 commit 0e0c52d
Show file tree
Hide file tree
Showing 11 changed files with 130 additions and 218 deletions.
20 changes: 4 additions & 16 deletions drivers/net/ethernet/intel/ice/ice_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -4790,11 +4790,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
struct ice_aqc_dis_txq_item *qg_list;
DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
struct ice_hw *hw;
u16 i, buf_size;

if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
Expand All @@ -4812,11 +4812,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
return -EIO;
}

buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list)
return -ENOMEM;

mutex_lock(&pi->sched_lock);

for (i = 0; i < num_queues; i++) {
Expand Down Expand Up @@ -4849,7 +4844,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
return status;
}

Expand Down Expand Up @@ -5018,22 +5012,17 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
u16 qg_size;
int i;

if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;

hw = pi->hw;

qg_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(qg_size, GFP_KERNEL);
if (!qg_list)
return -ENOMEM;

mutex_lock(&pi->sched_lock);

for (i = 0; i < count; i++) {
Expand All @@ -5058,7 +5047,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}

mutex_unlock(&pi->sched_lock);
kfree(qg_list);
return status;
}

Expand Down
39 changes: 11 additions & 28 deletions drivers/net/ethernet/intel/ice/ice_ddp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1560,21 +1560,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_aqc_get_pkg_info_resp *pkg_info;
u16 size;
DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;

size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL))
return ICE_DDP_PKG_ERR;

if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
state = ICE_DDP_PKG_ERR;
goto init_pkg_free_alloc;
}

for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
Expand Down Expand Up @@ -1604,10 +1597,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
pkg_info->pkg_info[i].name, flags);
}

init_pkg_free_alloc:
kfree(pkg_info);

return state;
return ICE_DDP_PKG_SUCCESS;
}

/**
Expand All @@ -1622,9 +1612,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
struct ice_aqc_get_pkg_info_resp *pkg;
DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u16 size;
u32 i;

/* Check package version compatibility */
Expand All @@ -1643,15 +1634,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}

/* Check if FW is compatible with the OS package */
size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
return ICE_DDP_PKG_ERR;

if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
state = ICE_DDP_PKG_LOAD_ERROR;
goto fw_ddp_compat_free_alloc;
}
if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL))
return ICE_DDP_PKG_LOAD_ERROR;

for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
Expand All @@ -1668,8 +1652,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
/* done processing NVM package so break */
break;
}
fw_ddp_compat_free_alloc:
kfree(pkg);

return state;
}

Expand Down
48 changes: 12 additions & 36 deletions drivers/net/ethernet/intel/ice/ice_lag.c
Original file line number Diff line number Diff line change
Expand Up @@ -430,10 +430,11 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
u16 numq, valq, buf_size, num_moved, qbuf_size;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
struct ice_hw *new_hw = NULL;
__le32 teid, parent_teid;
Expand Down Expand Up @@ -505,26 +506,17 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
goto resume_traffic;

/* Move Vf's VSI node for this TC to newport's scheduler tree */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc memory for VF node failover\n");
goto resume_traffic;
}

buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;

if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
NULL))
if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for failover\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);

kfree(buf);
goto resume_traffic;

qbuf_err:
Expand Down Expand Up @@ -755,10 +747,11 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
u16 numq, valq, buf_size, num_moved, qbuf_size;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
Expand Down Expand Up @@ -820,26 +813,17 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
goto resume_reclaim;

/* Move node to new parent */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc memory for VF node failover\n");
goto resume_reclaim;
}

buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;

if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
NULL))
if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);

kfree(buf);
goto resume_reclaim;

reclaim_qerr:
Expand Down Expand Up @@ -1792,10 +1776,11 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
u16 numq, valq, buf_size, num_moved, qbuf_size;
DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
Expand Down Expand Up @@ -1853,26 +1838,17 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
goto resume_sync;

/* Move node to new parent */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
goto resume_sync;
}

buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;

if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
NULL))
if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);

kfree(buf);
goto resume_sync;

sync_qerr:
Expand Down
23 changes: 5 additions & 18 deletions drivers/net/ethernet/intel/ice/ice_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1832,21 +1832,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)

int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
int err;
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);

if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;

qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;

qg_buf->num_txqs = 1;

err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
kfree(qg_buf);
return err;
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}

/**
Expand Down Expand Up @@ -1888,24 +1881,18 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;

qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
u16 q_idx;

qg_buf->num_txqs = 1;

for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
goto err_cfg_txqs;
break;
}

err_cfg_txqs:
kfree(qg_buf);
return err;
}

Expand Down
Loading

0 comments on commit 0e0c52d

Please sign in to comment.