Skip to content

Commit

Permalink
aio: reqs_active -> reqs_available
Browse files Browse the repository at this point in the history
The number of outstanding kiocbs is one of the few shared things left that
has to be touched for every kiocb - it'd be nice to make it percpu.

We can make it per cpu by treating it like an allocation problem: we have
a maximum number of kiocbs that can be outstanding (i.e.  slots) - then we
just allocate and free slots, and we know how to write per cpu allocators.

So as prep work for that, we convert reqs_active to reqs_available.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Reviewed-by: "Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
  • Loading branch information
Kent Overstreet authored and Benjamin LaHaise committed Jul 30, 2013
1 parent 0c45355 commit 34e83fc
Showing 1 changed file with 22 additions and 16 deletions.
38 changes: 22 additions & 16 deletions fs/aio.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,13 @@ struct kioctx {
struct work_struct rcu_work;

struct {
atomic_t reqs_active;
/*
* This counts the number of available slots in the ringbuffer,
* so we avoid overflowing it: it's decremented (if positive)
* when allocating a kiocb and incremented when the resulting
* io_event is pulled off the ringbuffer.
*/
atomic_t reqs_available;
} ____cacheline_aligned_in_smp;

struct {
Expand Down Expand Up @@ -404,19 +410,20 @@ static void free_ioctx(struct kioctx *ctx)
head = ring->head;
kunmap_atomic(ring);

while (atomic_read(&ctx->reqs_active) > 0) {
while (atomic_read(&ctx->reqs_available) < ctx->nr_events - 1) {
wait_event(ctx->wait,
head != ctx->tail ||
atomic_read(&ctx->reqs_active) <= 0);
(head != ctx->tail) ||
(atomic_read(&ctx->reqs_available) >=
ctx->nr_events - 1));

avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;

atomic_sub(avail, &ctx->reqs_active);
atomic_add(avail, &ctx->reqs_available);
head += avail;
head %= ctx->nr_events;
}

WARN_ON(atomic_read(&ctx->reqs_active) < 0);
WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);

aio_free_ring(ctx);

Expand Down Expand Up @@ -475,6 +482,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
if (aio_setup_ring(ctx) < 0)
goto out_freectx;

atomic_set(&ctx->reqs_available, ctx->nr_events - 1);

/* limit the number of system wide aios */
spin_lock(&aio_nr_lock);
if (aio_nr + nr_events > aio_max_nr ||
Expand Down Expand Up @@ -586,7 +595,7 @@ void exit_aio(struct mm_struct *mm)
"exit_aio:ioctx still alive: %d %d %d\n",
atomic_read(&ctx->users),
atomic_read(&ctx->dead),
atomic_read(&ctx->reqs_active));
atomic_read(&ctx->reqs_available));
/*
* We don't need to bother with munmap() here -
* exit_mmap(mm) is coming and it'll unmap everything.
Expand Down Expand Up @@ -615,12 +624,9 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
{
struct kiocb *req;

if (atomic_read(&ctx->reqs_active) >= ctx->nr_events)
if (atomic_dec_if_positive(&ctx->reqs_available) <= 0)
return NULL;

if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1)
goto out_put;

req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
if (unlikely(!req))
goto out_put;
Expand All @@ -630,7 +636,7 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)

return req;
out_put:
atomic_dec(&ctx->reqs_active);
atomic_inc(&ctx->reqs_available);
return NULL;
}

Expand Down Expand Up @@ -701,7 +707,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)

/*
* Take rcu_read_lock() in case the kioctx is being destroyed, as we
* need to issue a wakeup after decrementing reqs_active.
* need to issue a wakeup after incrementing reqs_available.
*/
rcu_read_lock();

Expand All @@ -719,7 +725,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
*/
if (unlikely(xchg(&iocb->ki_cancel,
KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
atomic_dec(&ctx->reqs_active);
atomic_inc(&ctx->reqs_available);
/* Still need the wake_up in case free_ioctx is waiting */
goto put_rq;
}
Expand Down Expand Up @@ -857,7 +863,7 @@ static long aio_read_events_ring(struct kioctx *ctx,

pr_debug("%li h%u t%u\n", ret, head, ctx->tail);

atomic_sub(ret, &ctx->reqs_active);
atomic_add(ret, &ctx->reqs_available);
out:
mutex_unlock(&ctx->ring_lock);

Expand Down Expand Up @@ -1241,7 +1247,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
aio_put_req(req); /* drop extra ref to req */
return 0;
out_put_req:
atomic_dec(&ctx->reqs_active);
atomic_inc(&ctx->reqs_available);
aio_put_req(req); /* drop extra ref to req */
aio_put_req(req); /* drop i/o ref to req */
return ret;
Expand Down

0 comments on commit 34e83fc

Please sign in to comment.