Skip to content

Commit

Permalink
ice: Fix issue reconfiguring VF queues
Browse files Browse the repository at this point in the history
When VF requested for queues changes, we need to update LAN Tx queue with
correct number of VF queue pairs and re-allocate VF resources based on
this new requested number of queues, which is constraint within maximum
queue supported per VF.

Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
  • Loading branch information
Akeem G Abodunrin authored and Jeff Kirsher committed Mar 22, 2019
1 parent 23d21c3 commit 5743020
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 16 deletions.
33 changes: 26 additions & 7 deletions drivers/net/ethernet/intel/ice/ice_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -297,22 +297,29 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
* @vf_id: Id of the VF being configured
*
* Return 0 on success and a negative value on error
*/
static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{
struct ice_pf *pf = vsi->back;

struct ice_vf *vf = NULL;

if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;

switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
vsi->alloc_txq = pf->num_vf_qps;
vsi->alloc_rxq = pf->num_vf_qps;
vf = &pf->vf[vsi->vf_id];
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
Expand Down Expand Up @@ -472,10 +479,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
* @vf_id: Id of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
struct ice_vsi *vsi = NULL;

Expand All @@ -501,7 +510,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;

ice_vsi_set_num_qs(vsi);
if (type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);

switch (vsi->type) {
case ICE_VSI_PF:
Expand Down Expand Up @@ -2171,7 +2183,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;

vsi = ice_vsi_alloc(pf, type);
if (type == ICE_VSI_VF)
vsi = ice_vsi_alloc(pf, type, vf_id);
else
vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);

if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
Expand Down Expand Up @@ -2691,7 +2707,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi);

/* Initialize VSI struct elements and create VSI in FW */
Expand Down
58 changes: 49 additions & 9 deletions drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
Original file line number Diff line number Diff line change
Expand Up @@ -495,13 +495,24 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
*/
static int ice_alloc_vf_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
int tx_rx_queue_left;
int status;

/* setup VF VSI and necessary resources */
status = ice_alloc_vsi_res(vf);
if (status)
goto ice_alloc_vf_res_exit;

/* Update number of VF queues, in case VF had requested for queue
* changes
*/
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;

if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
Expand Down Expand Up @@ -835,8 +846,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000);

/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_free_vf_res(&pf->vf[v]);
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];

ice_free_vf_res(vf);

/* Free VF queues as well, and reallocate later.
* If a given VF has different number of queues
* configured, the request for update will come
* via mailbox communication.
*/
vf->num_vf_qs = 0;
}

if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
Expand All @@ -845,8 +866,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}

/* Finish the reset on each VF */
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_cleanup_and_realloc_vf(&pf->vf[v]);
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];

vf->num_vf_qs = pf->num_vf_qps;
dev_dbg(&pf->pdev->dev,
"VF-id %d has %d queues configured\n",
vf->vf_id, vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}

ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
Expand Down Expand Up @@ -1766,6 +1794,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
enum ice_status aq_ret = 0;
struct ice_vsi *vsi;
int i;
Expand All @@ -1786,6 +1815,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}

if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, qci->num_queue_pairs);
aq_ret = ICE_ERR_PARAM;
goto error_param;
}

for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
Expand Down Expand Up @@ -2013,6 +2050,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
int req_queues = vfres->num_queue_pairs;
enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;

Expand All @@ -2021,22 +2059,24 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}

cur_queues = pf->num_vf_qps;
cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};

Expand Down

0 comments on commit 5743020

Please sign in to comment.