From b060022eaa28973fd76e9b906bb200c55ce15739 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 16 Mar 2011 16:29:00 -0400 Subject: [PATCH] --- yaml --- r: 286279 b: refs/heads/master c: fa92282149842645931580225647238428374758 h: refs/heads/master i: 286277: 2b8510e8a3dd27ee1553f6a09399914ba83c0087 286275: 2a0bc4e194558fb2b4c00bdc2f57da043e166dea 286271: aacdc5d77f3f824a2cbb113ebcf42d17ca1bcfd2 v: v3 --- [refs] | 2 +- trunk/drivers/block/nvme.c | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index f7240f78a18a..e1562edb0feb 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 714a7a22884b74862540bc84955274d86b2f6040 +refs/heads/master: fa92282149842645931580225647238428374758 diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme.c index e392919e0eac..740a9c1b81aa 100644 --- a/trunk/drivers/block/nvme.c +++ b/trunk/drivers/block/nvme.c @@ -182,7 +182,8 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, return (cmdid < 0) ? -EINTR : cmdid; } -/* If you need more than four handlers, you'll need to change how +/* + * If you need more than four handlers, you'll need to change how * alloc_cmdid and nvme_process_cq work. Consider using a special * CMD_CTX value instead, if that works for your situation. */ @@ -1066,7 +1067,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) prps = nvme_setup_prps(dev, &c.common, sg, length); nvmeq = get_nvmeq(ns); - /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption + /* + * Since nvme_submit_sync_cmd sleeps, we can't keep preemption * disabled. We may be preempted at any point, and be rescheduled * to a different CPU. That will cause cacheline bouncing, but no * additional races since q_lock already protects against other CPUs.