Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 286313
b: refs/heads/master
c: c2f5b65
h: refs/heads/master
i:
  286311: cdab126
v: v3
  • Loading branch information
Matthew Wilcox committed Jan 10, 2012
1 parent b0f59dc commit f80c170
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 87 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 010e646ba2fdfc558048a97da746381c35836280
refs/heads/master: c2f5b65020869215814df03c3941dac9436f99fb
167 changes: 81 additions & 86 deletions trunk/drivers/block/nvme.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,12 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
}

typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *);

struct nvme_cmd_info {
unsigned long ctx;
nvme_completion_fn fn;
void *ctx;
unsigned long timeout;
};

Expand All @@ -149,7 +153,7 @@ static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
* alloc_cmdid() - Allocate a Command ID
* @nvmeq: The queue that will be used for this command
* @ctx: A pointer that will be passed to the handler
* @handler: The ID of the handler to call
* @handler: The function to call on completion
*
* Allocate a Command ID for a queue. The data passed in will
* be passed to the completion handler. This is implemented by using
Expand All @@ -160,76 +164,97 @@ static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
* May be called with local interrupts disabled and the q_lock held,
* or with interrupts enabled and no locks held.
*/
static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
unsigned timeout)
static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
nvme_completion_fn handler, unsigned timeout)
{
int depth = nvmeq->q_depth - 1;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
int cmdid;

BUG_ON((unsigned long)ctx & 3);

do {
cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
if (cmdid >= depth)
return -EBUSY;
} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));

info[cmdid].ctx = (unsigned long)ctx | handler;
info[cmdid].fn = handler;
info[cmdid].ctx = ctx;
info[cmdid].timeout = jiffies + timeout;
return cmdid;
}

static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
int handler, unsigned timeout)
nvme_completion_fn handler, unsigned timeout)
{
int cmdid;
wait_event_killable(nvmeq->sq_full,
(cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
return (cmdid < 0) ? -EINTR : cmdid;
}

/*
* If you need more than four handlers, you'll need to change how
* alloc_cmdid and nvme_process_cq work. Consider using a special
* CMD_CTX value instead, if that works for your situation.
*/
enum {
sync_completion_id = 0,
bio_completion_id,
};

/* Special values must be a multiple of 4, and less than 0x1000 */
#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
/* Special values must be less than 0x1000 */
#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)

static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
if (ctx == CMD_CTX_CANCELLED)
return;
if (ctx == CMD_CTX_FLUSH)
return;
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
if (ctx == CMD_CTX_INVALID) {
dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}

dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
}

/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
nvme_completion_fn *fn)
{
unsigned long data;
void *ctx;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);

if (cmdid >= nvmeq->q_depth)
if (cmdid >= nvmeq->q_depth) {
*fn = special_completion;
return CMD_CTX_INVALID;
data = info[cmdid].ctx;
}
*fn = info[cmdid].fn;
ctx = info[cmdid].ctx;
info[cmdid].fn = special_completion;
info[cmdid].ctx = CMD_CTX_COMPLETED;
clear_bit(cmdid, nvmeq->cmdid_data);
wake_up(&nvmeq->sq_full);
return data;
return ctx;
}

static unsigned long cancel_cmdid(struct nvme_queue *nvmeq, int cmdid)
static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
nvme_completion_fn *fn)
{
unsigned long data;
void *ctx;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
data = info[cmdid].ctx;
if (fn)
*fn = info[cmdid].fn;
ctx = info[cmdid].ctx;
info[cmdid].fn = special_completion;
info[cmdid].ctx = CMD_CTX_CANCELLED;
return data;
return ctx;
}

static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
Expand Down Expand Up @@ -485,7 +510,7 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
{
int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
sync_completion_id, IO_TIMEOUT);
special_completion, IO_TIMEOUT);
if (unlikely(cmdid < 0))
return cmdid;

Expand Down Expand Up @@ -518,7 +543,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
nbio->bio = bio;

result = -EBUSY;
cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
cmdid = alloc_cmdid(nvmeq, nbio, bio_completion, IO_TIMEOUT);
if (unlikely(cmdid < 0))
goto free_nbio;

Expand Down Expand Up @@ -599,45 +624,6 @@ static int nvme_make_request(struct request_queue *q, struct bio *bio)
return 0;
}

struct sync_cmd_info {
struct task_struct *task;
u32 result;
int status;
};

static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED))
return;
if ((unsigned long)cmdinfo == CMD_CTX_FLUSH)
return;
if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) {
dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id));
return;
}
cmdinfo->result = le32_to_cpup(&cqe->result);
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
wake_up_process(cmdinfo->task);
}

typedef void (*completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *);

static const completion_fn nvme_completions[4] = {
[sync_completion_id] = sync_completion,
[bio_completion_id] = bio_completion,
};

static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
{
u16 head, phase;
Expand All @@ -646,9 +632,8 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
phase = nvmeq->cq_phase;

for (;;) {
unsigned long data;
void *ptr;
unsigned char handler;
void *ctx;
nvme_completion_fn fn;
struct nvme_completion cqe = nvmeq->cqes[head];
if ((le16_to_cpu(cqe.status) & 1) != phase)
break;
Expand All @@ -658,10 +643,8 @@ static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
phase = !phase;
}

data = free_cmdid(nvmeq, cqe.command_id);
handler = data & 3;
ptr = (void *)(data & ~3UL);
nvme_completions[handler](nvmeq, ptr, &cqe);
ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
fn(nvmeq, ctx, &cqe);
}

/* If the controller ignores the cq head doorbell and continuously
Expand Down Expand Up @@ -702,10 +685,25 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
{
spin_lock_irq(&nvmeq->q_lock);
cancel_cmdid(nvmeq, cmdid);
cancel_cmdid(nvmeq, cmdid, NULL);
spin_unlock_irq(&nvmeq->q_lock);
}

struct sync_cmd_info {
struct task_struct *task;
u32 result;
int status;
};

static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe)
{
struct sync_cmd_info *cmdinfo = ctx;
cmdinfo->result = le32_to_cpup(&cqe->result);
cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
wake_up_process(cmdinfo->task);
}

/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
Expand All @@ -719,7 +717,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
cmdinfo.task = current;
cmdinfo.status = -EINTR;

cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
timeout);
if (cmdid < 0)
return cmdid;
Expand Down Expand Up @@ -1201,18 +1199,15 @@ static void nvme_timeout_ios(struct nvme_queue *nvmeq)
int cmdid;

for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
unsigned long data;
void *ptr;
unsigned char handler;
void *ctx;
nvme_completion_fn fn;
static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };

if (!time_after(now, info[cmdid].timeout))
continue;
dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
data = cancel_cmdid(nvmeq, cmdid);
handler = data & 3;
ptr = (void *)(data & ~3UL);
nvme_completions[handler](nvmeq, ptr, &cqe);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq, ctx, &cqe);
}
}

Expand Down

0 comments on commit f80c170

Please sign in to comment.