From e302af55ea9b51166024c21318e4922f53eb15ad Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 4 Feb 2011 16:14:30 -0500 Subject: [PATCH] --- yaml --- r: 286246 b: refs/heads/master c: b1ad37efcafe396ac3944853589688dd0ec3c64e h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/block/nvme.c | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index 99294d3813b4..62a805ceab95 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3c0cf138d7789feb3f335f6f1d24ad8fc8b3a23f +refs/heads/master: b1ad37efcafe396ac3944853589688dd0ec3c64e diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme.c index 4bfed59f3629..1c3cd6cc0ad9 100644 --- a/trunk/drivers/block/nvme.c +++ b/trunk/drivers/block/nvme.c @@ -842,8 +842,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) nvme_setup_prps(&c.common, sg, length); nvmeq = get_nvmeq(ns); - status = nvme_submit_sync_cmd(nvmeq, &c, &result); + /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption + * disabled. We may be preempted at any point, and be rescheduled + * to a different CPU. That will cause cacheline bouncing, but no + * additional races since q_lock already protects against other CPUs. + */ put_nvmeq(nvmeq); + status = nvme_submit_sync_cmd(nvmeq, &c, &result); nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); put_user(result, &uio->result);