Skip to content

Commit

Permalink
aio: Don't use ctx->tail unnecessarily
Browse files Browse the repository at this point in the history
aio_complete() (arguably) needs to keep its own trusted copy of the tail
pointer, but io_getevents() doesn't have to use it - it's already using
the head pointer from the ring buffer.

So convert it to use the tail from the ring buffer so it touches fewer
cachelines and doesn't contend with the cacheline aio_complete() needs.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
  • Loading branch information
Kent Overstreet authored and Benjamin LaHaise committed Jul 30, 2013
1 parent bec68fa commit 5ffac12
Showing 1 changed file with 23 additions and 19 deletions.
42 changes: 23 additions & 19 deletions fs/aio.c
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,8 @@ static void free_ioctx(struct work_struct *work)
struct kioctx *ctx = container_of(work, struct kioctx, free_work);
struct aio_ring *ring;
struct kiocb *req;
unsigned cpu, head, avail;
unsigned cpu, avail;
DEFINE_WAIT(wait);

spin_lock_irq(&ctx->ctx_lock);

Expand All @@ -427,22 +428,24 @@ static void free_ioctx(struct work_struct *work)
kcpu->reqs_available = 0;
}

ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
kunmap_atomic(ring);
while (1) {
prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);

while (atomic_read(&ctx->reqs_available) < ctx->nr_events - 1) {
wait_event(ctx->wait,
(head != ctx->tail) ||
(atomic_read(&ctx->reqs_available) >=
ctx->nr_events - 1));

avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
ring = kmap_atomic(ctx->ring_pages[0]);
avail = (ring->head <= ring->tail)
? ring->tail - ring->head
: ctx->nr_events - ring->head + ring->tail;

atomic_add(avail, &ctx->reqs_available);
head += avail;
head %= ctx->nr_events;
ring->head = ring->tail;
kunmap_atomic(ring);

if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
break;

schedule();
}
finish_wait(&ctx->wait, &wait);

WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);

Expand Down Expand Up @@ -869,28 +872,29 @@ static long aio_read_events_ring(struct kioctx *ctx,
struct io_event __user *event, long nr)
{
struct aio_ring *ring;
unsigned head, pos;
unsigned head, tail, pos;
long ret = 0;
int copy_ret;

mutex_lock(&ctx->ring_lock);

ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
tail = ring->tail;
kunmap_atomic(ring);

pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events);
pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);

if (head == ctx->tail)
if (head == tail)
goto out;

while (ret < nr) {
long avail;
struct io_event *ev;
struct page *page;

avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
if (head == ctx->tail)
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;

avail = min(avail, nr - ret);
Expand Down Expand Up @@ -921,7 +925,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
kunmap_atomic(ring);
flush_dcache_page(ctx->ring_pages[0]);

pr_debug("%li h%u t%u\n", ret, head, ctx->tail);
pr_debug("%li h%u t%u\n", ret, head, tail);

put_reqs_available(ctx, ret);
out:
Expand Down

0 comments on commit 5ffac12

Please sign in to comment.