Skip to content

Commit

Permalink
ice: Rework queue management code for reuse
Browse files Browse the repository at this point in the history
This patch reworks the queue management code to allow for reuse with the
XDP feature (to be added in a future patch).

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Anirudh Venkataramanan authored and Jeff Kirsher committed Jan 15, 2019
1 parent ab4ab73 commit 03f7a98
Show file tree
Hide file tree
Showing 5 changed files with 159 additions and 123 deletions.
11 changes: 11 additions & 0 deletions drivers/net/ethernet/intel/ice/ice.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,17 @@ struct ice_res_tracker {
u16 list[1];
};

struct ice_qs_cfg {
struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
unsigned long *pf_map;
unsigned long pf_map_size;
unsigned int q_count;
unsigned int scatter_count;
u16 *vsi_map;
u16 vsi_map_offset;
u8 mapping_mode;
};

struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
Expand Down
236 changes: 127 additions & 109 deletions drivers/net/ethernet/intel/ice/ice_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -514,109 +514,88 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
}

/**
* ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @vsi: the VSI getting queues
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
* Return 0 on success and a negative value on error
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{
struct ice_pf *pf = vsi->back;
int offset, ret = 0;

mutex_lock(&pf->avail_q_mutex);
/* look for contiguous block of queues for Tx */
offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
0, vsi->alloc_txq, 0);
if (offset < ICE_MAX_TXQS) {
int i;
int offset, i;

bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
for (i = 0; i < vsi->alloc_txq; i++)
vsi->txq_map[i] = i + offset;
} else {
ret = -ENOMEM;
vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
mutex_lock(qs_cfg->qs_mutex);
offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
0, qs_cfg->q_count, 0);
if (offset >= qs_cfg->pf_map_size) {
mutex_unlock(qs_cfg->qs_mutex);
return -ENOMEM;
}

/* look for contiguous block of queues for Rx */
offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
0, vsi->alloc_rxq, 0);
if (offset < ICE_MAX_RXQS) {
int i;

bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rxq_map[i] = i + offset;
} else {
ret = -ENOMEM;
vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
}
mutex_unlock(&pf->avail_q_mutex);
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
for (i = 0; i < qs_cfg->q_count; i++)
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
mutex_unlock(qs_cfg->qs_mutex);

return ret;
return 0;
}

/**
* ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
* @vsi: the VSI getting queues
* __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
* Return 0 on success and a negative value on error
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{
struct ice_pf *pf = vsi->back;
int i, index = 0;

mutex_lock(&pf->avail_q_mutex);

if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
for (i = 0; i < vsi->alloc_txq; i++) {
index = find_next_zero_bit(pf->avail_txqs,
ICE_MAX_TXQS, index);
if (index < ICE_MAX_TXQS) {
set_bit(index, pf->avail_txqs);
vsi->txq_map[i] = index;
} else {
goto err_scatter_tx;
}
}
}

if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
for (i = 0; i < vsi->alloc_rxq; i++) {
index = find_next_zero_bit(pf->avail_rxqs,
ICE_MAX_RXQS, index);
if (index < ICE_MAX_RXQS) {
set_bit(index, pf->avail_rxqs);
vsi->rxq_map[i] = index;
} else {
goto err_scatter_rx;
}
}
mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < qs_cfg->q_count; i++) {
index = find_next_zero_bit(qs_cfg->pf_map,
qs_cfg->pf_map_size, index);
if (index >= qs_cfg->pf_map_size)
goto err_scatter;
set_bit(index, qs_cfg->pf_map);
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
}
mutex_unlock(qs_cfg->qs_mutex);

mutex_unlock(&pf->avail_q_mutex);
return 0;

err_scatter_rx:
/* unflag any queues we have grabbed (i is failed position) */
err_scatter:
for (index = 0; index < i; index++) {
clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
vsi->rxq_map[index] = 0;
}
i = vsi->alloc_txq;
err_scatter_tx:
/* i is either position of failed attempt or vsi->alloc_txq */
for (index = 0; index < i; index++) {
clear_bit(vsi->txq_map[index], pf->avail_txqs);
vsi->txq_map[index] = 0;
clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
}
mutex_unlock(qs_cfg->qs_mutex);

mutex_unlock(&pf->avail_q_mutex);
return -ENOMEM;
}

/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
* This is an internal function for assigning queues from the PF to VSI and
* initially tries to find contiguous space. If it is not successful to find
* contiguous space, then it tries with the scatter approach.
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
{
int ret = 0;

ret = __ice_vsi_get_qs_contig(qs_cfg);
if (ret) {
/* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg);
}
return ret;
}

/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
Expand All @@ -625,25 +604,35 @@ static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
*/
static int ice_vsi_get_qs(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg tx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
.pf_map_size = ICE_MAX_TXQS,
.q_count = vsi->alloc_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
.mapping_mode = vsi->tx_mapping_mode
};
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_rxqs,
.pf_map_size = ICE_MAX_RXQS,
.q_count = vsi->alloc_rxq,
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
.mapping_mode = vsi->rx_mapping_mode
};
int ret = 0;

vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;

/* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
* modes individually to scatter if assigning contiguous queues
* to Rx or Tx fails
*/
ret = ice_vsi_get_qs_contig(vsi);
if (ret < 0) {
if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
ICE_MAX_SCATTER_TXQS);
if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
ICE_MAX_SCATTER_RXQS);
ret = ice_vsi_get_qs_scatter(vsi);
}
ret = __ice_vsi_get_qs(&tx_qs_cfg);
if (!ret)
ret = __ice_vsi_get_qs(&rx_qs_cfg);

return ret;
}
Expand Down Expand Up @@ -1614,19 +1603,22 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
* @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
int err = 0, tc;

buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
Expand All @@ -1644,9 +1636,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };

pf_q = vsi->txq_map[q_idx];
ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
pf_q);
pf_q = vsi->txq_map[q_idx + offset];
ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
Expand All @@ -1655,7 +1646,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
vsi->tx_rings[q_idx]->tail =
rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len,
Expand All @@ -1674,7 +1665,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[q_idx]->txq_teid =
rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid);

q_idx++;
Expand All @@ -1685,6 +1676,18 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
return err;
}

/**
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
}

/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
Expand Down Expand Up @@ -1897,9 +1900,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
* @rings: Tx ring array to be stopped
* @offset: offset within vsi->txq_map
*/
int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings, int offset)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
Expand Down Expand Up @@ -1927,27 +1933,26 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;

if (!vsi->tx_rings || !vsi->tx_rings[i] ||
!vsi->tx_rings[i]->q_vector) {
if (!rings || !rings[i] || !rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}

q_ids[i] = vsi->txq_map[i];
q_teids[i] = vsi->tx_rings[i]->txq_teid;
q_ids[i] = vsi->txq_map[i + offset];
q_teids[i] = rings[i]->txq_teid;

/* clear cause_ena bit for disabled queues */
val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);

/* software is expected to wait for 100 ns */
ndelay(100);

/* trigger a software interrupt for the vector associated to
* the queue to schedule NAPI handler
*/
v_idx = vsi->tx_rings[i]->q_vector->v_idx;
v_idx = rings[i]->q_vector->v_idx;
wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
Expand Down Expand Up @@ -1976,6 +1981,19 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
return err;
}

/**
* ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
*/
int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
}

/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
Expand Down
5 changes: 3 additions & 2 deletions drivers/net/ethernet/intel/ice/ice_lib.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);

int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);

int ice_vsi_cfg_txqs(struct ice_vsi *vsi);
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);

void ice_vsi_cfg_msix(struct ice_vsi *vsi);

Expand All @@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);

int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);

int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);

int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
Expand Down
Loading

0 comments on commit 03f7a98

Please sign in to comment.