Skip to content

Commit

Permalink
scsi: qedf: Add schedule recovery handler
Browse files Browse the repository at this point in the history
Implement recovery handler to be used by QED to signal the need for
recovery to come out of an error condition like ramrod struck and firmware
context reset.

Link: https://lore.kernel.org/r/20200416084314.18851-8-skashyap@marvell.com
Signed-off-by: Chad Dupuis <cdupuis@marvell.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
  • Loading branch information
Chad Dupuis authored and Martin K. Petersen committed Apr 17, 2020
1 parent 6e7c8ee commit f6b172f
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 0 deletions.
1 change: 1 addition & 0 deletions drivers/scsi/qedf/qedf.h
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,7 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
struct delayed_work grcdump_work;
struct delayed_work stag_work;

Expand Down
43 changes: 43 additions & 0 deletions drivers/scsi/qedf/qedf_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev);
static void qedf_schedule_recovery_handler(void *dev);
static void qedf_recovery_handler(struct work_struct *work);

/*
* Driver module parameters.
Expand Down Expand Up @@ -662,6 +664,7 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
.bw_update = qedf_bw_update,
.schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
Expand Down Expand Up @@ -3502,6 +3505,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf);
}
INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);

/*
* GRC dump and sysfs parameters are not reaped during the recovery
Expand Down Expand Up @@ -3817,6 +3821,45 @@ static void qedf_shutdown(struct pci_dev *pdev)
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}

/*
* Recovery handler code
*/
static void qedf_schedule_recovery_handler(void *dev)
{
struct qedf_ctx *qedf = dev;

QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
schedule_delayed_work(&qedf->recovery_work, 0);
}

static void qedf_recovery_handler(struct work_struct *work)
{
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, recovery_work.work);

if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
return;

/*
* Call common_ops->recovery_prolog to allow the MFW to quiesce
* any PCI transactions.
*/
qed_ops->common->recovery_prolog(qedf->cdev);

QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
__qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
/*
* Reset link and dcbx to down state since we will not get a link down
* event from the MFW but calling __qedf_remove will essentially be a
* link down event.
*/
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
__qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
}

/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
Expand Down

0 comments on commit f6b172f

Please sign in to comment.