Skip to content

Commit

Permalink
nvme: simplify completion handling
Browse files Browse the repository at this point in the history
Now that all commands are executed as block layer requests we can remove the
internal completion in the NVMe driver.  Note that we can simply call
blk_mq_complete_request to abort commands as the block layer will protect
against double copletions internally.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Christoph Hellwig authored and Jens Axboe committed Dec 22, 2015
1 parent adf68f2 commit aae239e
Showing 1 changed file with 26 additions and 115 deletions.
141 changes: 26 additions & 115 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,15 +199,11 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
}

typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *);

struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
int aborted;
struct nvme_queue *nvmeq;
struct nvme_iod iod[0];
struct nvme_iod *iod;
struct nvme_iod __iod;
};

/*
Expand Down Expand Up @@ -302,15 +298,6 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}

static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
nvme_completion_fn handler)
{
cmd->fn = handler;
cmd->ctx = ctx;
cmd->aborted = 0;
blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
}

static void *iod_get_private(struct nvme_iod *iod)
{
return (void *) (iod->private & ~0x1UL);
Expand All @@ -324,44 +311,6 @@ static bool iod_should_kfree(struct nvme_iod *iod)
return (iod->private & NVME_INT_MASK) == 0;
}

/* Special values must be less than 0x1000 */
#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)

static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
if (ctx == CMD_CTX_CANCELLED)
return;
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
if (ctx == CMD_CTX_INVALID) {
dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
}

static void *cancel_cmd_info(struct nvme_cmd_info *cmd, nvme_completion_fn *fn)
{
void *ctx;

if (fn)
*fn = cmd->fn;
ctx = cmd->ctx;
cmd->fn = special_completion;
cmd->ctx = CMD_CTX_CANCELLED;
return ctx;
}

static void nvme_complete_async_event(struct nvme_dev *dev,
struct nvme_completion *cqe)
{
Expand All @@ -382,34 +331,6 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
}
}

static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
unsigned int tag)
{
struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, tag);

return blk_mq_rq_to_pdu(req);
}

/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
nvme_completion_fn *fn)
{
struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag);
void *ctx;
if (tag >= nvmeq->q_depth) {
*fn = special_completion;
return CMD_CTX_INVALID;
}
if (fn)
*fn = cmd->fn;
ctx = cmd->ctx;
cmd->fn = special_completion;
cmd->ctx = CMD_CTX_COMPLETED;
return ctx;
}

/**
* __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
Expand Down Expand Up @@ -473,7 +394,7 @@ static struct nvme_iod *nvme_alloc_iod(struct request *rq, struct nvme_dev *dev,
size <= NVME_INT_BYTES(dev)) {
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(rq);

iod = cmd->iod;
iod = &cmd->__iod;
iod_init(iod, size, rq->nr_phys_segments,
(unsigned long) rq | NVME_INT_MASK);
return iod;
Expand Down Expand Up @@ -570,12 +491,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif

static void req_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
static void req_completion(struct nvme_queue *nvmeq, struct nvme_completion *cqe)
{
struct nvme_iod *iod = ctx;
struct request *req = iod_get_private(iod);
struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
struct nvme_iod *iod = cmd_rq->iod;
u16 status = le16_to_cpup(&cqe->status) >> 1;
int error = 0;

Expand All @@ -586,14 +506,10 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
return;
}

if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
if (cmd_rq->ctx == CMD_CTX_CANCELLED)
error = NVME_SC_CANCELLED;
else
error = status;
} else {
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
error = status;
else
error = nvme_error_status(status);
}
}

if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
Expand Down Expand Up @@ -836,8 +752,10 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
goto out;

cmd->iod = iod;
cmd->aborted = 0;
cmnd.common.command_id = req->tag;
nvme_set_info(cmd, iod, req_completion);
blk_mq_start_request(req);

spin_lock_irq(&nvmeq->q_lock);
__nvme_submit_cmd(nvmeq, &cmnd);
Expand All @@ -857,8 +775,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
phase = nvmeq->cq_phase;

for (;;) {
void *ctx;
nvme_completion_fn fn;
struct nvme_completion cqe = nvmeq->cqes[head];
u16 status = le16_to_cpu(cqe.status);

Expand All @@ -873,6 +789,13 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
if (tag && *tag == cqe.command_id)
*tag = -1;

if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n",
cqe.command_id, le16_to_cpu(cqe.sq_id));
continue;
}

/*
* AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to
Expand All @@ -885,8 +808,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
continue;
}

ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
fn(nvmeq, ctx, &cqe);
req_completion(nvmeq, &cqe);
}

/* If the controller ignores the cq head doorbell and continuously
Expand Down Expand Up @@ -1125,29 +1047,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
{
struct nvme_queue *nvmeq = data;
void *ctx;
nvme_completion_fn fn;
struct nvme_cmd_info *cmd;
struct nvme_completion cqe;
int status;

if (!blk_mq_request_started(req))
return;

cmd = blk_mq_rq_to_pdu(req);

if (cmd->ctx == CMD_CTX_CANCELLED)
return;
dev_warn(nvmeq->q_dmadev,
"Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);

status = NVME_SC_CANCELLED;
if (blk_queue_dying(req->q))
cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
else
cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);


dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
req->tag, nvmeq->qid);
ctx = cancel_cmd_info(cmd, &fn);
fn(nvmeq, ctx, &cqe);
status |= NVME_SC_DNR;
blk_mq_complete_request(req, status);
}

static void nvme_free_queue(struct nvme_queue *nvmeq)
Expand Down

0 comments on commit aae239e

Please sign in to comment.