Skip to content

Commit

Permalink
Merge branch 'qed-next'
Browse files Browse the repository at this point in the history
Manish Chopra says:

====================
qed*: driver updates

There are several new additions in this series;
Most are connected to either Tx offloading or Rx classifications
[either fastpath changes or supporting configuration].

In addition, there's a single IOV enhancement.

Please consider applying this series to `net-next'.

V2->V3:
Fixes below kbuild warning
call to '__compiletime_assert_60' declared with
attribute error: Need native word sized stores/loads for atomicity.

V1->V2:
Added a fix for the race in ramrod handling
pointed by Eric Dumazet [patch 7].
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Oct 14, 2016
2 parents d0b3fbb + d5df768 commit 9c7664c
Show file tree
Hide file tree
Showing 10 changed files with 244 additions and 64 deletions.
12 changes: 10 additions & 2 deletions drivers/net/ethernet/qlogic/qed/qed_l2.c
Original file line number Diff line number Diff line change
Expand Up @@ -1652,6 +1652,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,

if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0;

if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i)
Expand All @@ -1665,11 +1666,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_queues = cdev->num_hwfns;
}

if (IS_QED_SRIOV(cdev))
if (IS_QED_SRIOV(cdev)) {
max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
QED_ETH_VF_NUM_VLAN_FILTERS;
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
max_vf_mac_filters = cdev->p_iov_info->total_vfs *
QED_ETH_VF_NUM_MAC_FILTERS;
}
info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
QED_VLAN) -
max_vf_vlan_filters;
info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
QED_MAC) -
max_vf_mac_filters;

ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
Expand Down
4 changes: 2 additions & 2 deletions drivers/net/ethernet/qlogic/qed/qed_sp.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ union qed_spq_req_comp {
};

struct qed_spq_comp_done {
u64 done;
u8 fw_return_code;
unsigned int done;
u8 fw_return_code;
};

struct qed_spq_entry {
Expand Down
97 changes: 65 additions & 32 deletions drivers/net/ethernet/qlogic/qed/qed_spq.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,11 @@
***************************************************************************/

#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
#define SPQ_BLOCK_SLEEP_LENGTH (1000)

#define SPQ_BLOCK_DELAY_MAX_ITER (10)
#define SPQ_BLOCK_DELAY_US (10)
#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
#define SPQ_BLOCK_SLEEP_MS (5)

/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
Expand All @@ -50,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,

comp_done = (struct qed_spq_comp_done *)cookie;

comp_done->done = 0x1;
comp_done->fw_return_code = fw_return_code;
comp_done->fw_return_code = fw_return_code;

/* make update visible to waiting thread */
smp_wmb();
/* Make sure completion done is visible on waiting thread */
smp_store_release(&comp_done->done, 0x1);
}

static int qed_spq_block(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *p_fw_ret)
static int __qed_spq_block(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *p_fw_ret, bool sleep_between_iter)
{
int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct qed_spq_comp_done *comp_done;
int rc;
u32 iter_cnt;

comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
while (sleep_count) {
/* validate we receive completion update */
smp_rmb();
if (comp_done->done == 1) {
iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
: SPQ_BLOCK_DELAY_MAX_ITER;

while (iter_cnt--) {
/* Validate we receive completion update */
if (READ_ONCE(comp_done->done) == 1) {
/* Read updated FW return value */
smp_read_barrier_depends();
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
usleep_range(5000, 10000);
sleep_count--;

if (sleep_between_iter)
msleep(SPQ_BLOCK_SLEEP_MS);
else
udelay(SPQ_BLOCK_DELAY_US);
}

return -EBUSY;
}

static int qed_spq_block(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct qed_spq_comp_done *comp_done;
int rc;

/* A relatively short polling period w/o sleeping, to allow the FW to
* complete the ramrod and thus possibly to avoid the following sleeps.
*/
if (!skip_quick_poll) {
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
if (!rc)
return 0;
}

/* Move to polling with a sleeping period between iterations */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
return 0;

DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
if (rc != 0)
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
}

/* Retry after drain */
sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
while (sleep_count) {
/* validate we receive completion update */
smp_rmb();
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
usleep_range(5000, 10000);
sleep_count--;
}
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
return 0;

comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}

DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
err:
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
p_ent->elem.hdr.cmd_id,
p_ent->elem.hdr.protocol_id,
le16_to_cpu(p_ent->elem.hdr.echo));

return -EBUSY;
}
Expand Down Expand Up @@ -729,7 +761,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
p_ent->queue == &p_spq->unlimited_pending);

if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to
Expand Down
Loading

0 comments on commit 9c7664c

Please sign in to comment.