From 2294ad9112ccc625e7f6a271a7b70fac928ba0ab Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 25 Feb 2011 14:44:27 -0800 Subject: [PATCH] --- yaml --- r: 233700 b: refs/heads/master c: 7137c6bd455234bcb7560fd829e6ee49cae5fed6 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/fs/aio.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index f0bda59d0bf7..b9bd9cea55ba 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3bd9a5d734c7cc7533b27abf451416c7f50095a7 +refs/heads/master: 7137c6bd455234bcb7560fd829e6ee49cae5fed6 diff --git a/trunk/fs/aio.c b/trunk/fs/aio.c index b4dd668fbccc..26869cde3953 100644 --- a/trunk/fs/aio.c +++ b/trunk/fs/aio.c @@ -1642,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, goto out_put_req; spin_lock_irq(&ctx->ctx_lock); + /* + * We could have raced with io_destroy() and are currently holding a + * reference to ctx which should be destroyed. We cannot submit IO + * since ctx gets freed as soon as io_submit() puts its reference. The + * check here is reliable: io_destroy() sets ctx->dead before waiting + * for outstanding IO and the barrier between these two is realized by + * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we + * increment ctx->reqs_active before checking for ctx->dead and the + * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we + * don't see ctx->dead set here, io_destroy() waits for our IO to + * finish. + */ + if (ctx->dead) { + spin_unlock_irq(&ctx->ctx_lock); + ret = -EINVAL; + goto out_put_req; + } aio_run_iocb(req); if (!list_empty(&ctx->run_list)) { /* drain the run list */