Skip to content

Commit

Permalink
nvme: remove handling of multiple AEN requests
Browse files Browse the repository at this point in the history
The driver can handle tracking only one AEN request, so this patch
removes handling for multiple ones.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: James Smart  <james.smart@broadcom.com>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Keith Busch authored and Jens Axboe committed Nov 11, 2017
1 parent 08e1507 commit ad22c35
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 40 deletions.
28 changes: 3 additions & 25 deletions drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2670,15 +2670,7 @@ static void nvme_async_event_work(struct work_struct *work)
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, async_event_work);

spin_lock_irq(&ctrl->lock);
while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
int aer_idx = --ctrl->event_limit;

spin_unlock_irq(&ctrl->lock);
ctrl->ops->submit_async_event(ctrl, aer_idx);
spin_lock_irq(&ctrl->lock);
}
spin_unlock_irq(&ctrl->lock);
ctrl->ops->submit_async_event(ctrl);
}

static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
Expand Down Expand Up @@ -2745,22 +2737,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
bool done = true;

switch (le16_to_cpu(status) >> 1) {
case NVME_SC_SUCCESS:
done = false;
/*FALLTHRU*/
case NVME_SC_ABORT_REQ:
++ctrl->event_limit;
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->async_event_work);
break;
default:
break;
}

if (done)
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;

switch (result & 0xff07) {
Expand All @@ -2774,12 +2752,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);

void nvme_queue_async_events(struct nvme_ctrl *ctrl)
{
ctrl->event_limit = NVME_NR_AEN_COMMANDS;
queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_queue_async_events);
Expand Down
9 changes: 3 additions & 6 deletions drivers/nvme/host/fc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2382,17 +2382,14 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
}

static void
nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
unsigned long flags;
bool terminating = false;
blk_status_t ret;

if (aer_idx > NVME_NR_AEN_COMMANDS)
return;

spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->flags & FCCTRL_TERMIO)
terminating = true;
Expand All @@ -2401,13 +2398,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
if (terminating)
return;

aen_op = &ctrl->aen_ops[aer_idx];
aen_op = &ctrl->aen_ops[0];

ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
NVMEFC_FCP_NODATA);
if (ret)
dev_err(ctrl->ctrl.device,
"failed async event work [%d]\n", aer_idx);
"failed async event work\n");
}

static void
Expand Down
3 changes: 1 addition & 2 deletions drivers/nvme/host/nvme.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,6 @@ struct nvme_ctrl {
u16 nssa;
u16 nr_streams;
atomic_t abort_limit;
u8 event_limit;
u8 vwc;
u32 vs;
u32 sgls;
Expand Down Expand Up @@ -237,7 +236,7 @@ struct nvme_ctrl_ops {
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
int (*reinit_request)(void *data, struct request *rq);
Expand Down
4 changes: 2 additions & 2 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1043,15 +1043,15 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return __nvme_poll(nvmeq, tag);
}

static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_command c;

memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;

spin_lock_irq(&nvmeq->q_lock);
__nvme_submit_cmd(nvmeq, &c);
Expand Down
5 changes: 1 addition & 4 deletions drivers/nvme/host/rdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1293,7 +1293,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
return queue->ctrl->tag_set.tags[queue_idx - 1];
}

static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
struct nvme_rdma_queue *queue = &ctrl->queues[0];
Expand All @@ -1303,9 +1303,6 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
struct ib_sge sge;
int ret;

if (WARN_ON_ONCE(aer_idx != 0))
return;

ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);

memset(cmd, 0, sizeof(*cmd));
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/target/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}

static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
struct nvme_loop_queue *queue = &ctrl->queues[0];
Expand Down

0 comments on commit ad22c35

Please sign in to comment.