Skip to content

Commit

Permalink
net: hns3: optimize the CSQ cmd error handling
Browse files Browse the repository at this point in the history
If CMDQ ring is full, hclge_cmd_send may return directly, but IMP still
working and HW pointer changed, SW ring pointer do not match the HW
pointer. This patch update the SW pointer every time when the space is
full, so it can work normally next time if IMP and HW still working.

Signed-off-by: Peng Li <lipeng321@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Peng Li authored and David S. Miller committed Jun 28, 2019
1 parent 289f812 commit 82c8ae6
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 8 deletions.
15 changes: 12 additions & 3 deletions drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
Expand All @@ -241,8 +242,16 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)

spin_lock_bh(&hw->cmq.csq.lock);

if (num > hclge_ring_space(&hw->cmq.csq) ||
test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}

if (num > hclge_ring_space(&hw->cmq.csq)) {
/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
* need update the SW HEAD pointer csq->next_to_clean
*/
csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
Expand Down Expand Up @@ -280,7 +289,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}

if (!complete) {
retval = -EAGAIN;
retval = -EBADE;
} else {
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
}
Expand Down
19 changes: 14 additions & 5 deletions drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
struct hclgevf_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
Expand All @@ -199,8 +200,17 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)

spin_lock_bh(&hw->cmq.csq.lock);

if (num > hclgevf_ring_space(&hw->cmq.csq) ||
test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}

if (num > hclgevf_ring_space(&hw->cmq.csq)) {
/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
* need update the SW HEAD pointer csq->next_to_clean
*/
csq->next_to_clean = hclgevf_read_dev(hw,
HCLGEVF_NIC_CSQ_HEAD_REG);
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
Expand Down Expand Up @@ -263,14 +273,13 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
}

if (!complete)
status = -EAGAIN;
status = -EBADE;

/* Clean the command send queue */
handle = hclgevf_cmd_csq_clean(hw);
if (handle != num) {
if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
}

spin_unlock_bh(&hw->cmq.csq.lock);

Expand Down

0 comments on commit 82c8ae6

Please sign in to comment.