Skip to content

Commit

Permalink
aio: now fput() is OK from interrupt context; get rid of manual delay…
Browse files Browse the repository at this point in the history
…ed __fput()

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
  • Loading branch information
Al Viro committed Jul 22, 2012
1 parent 4a9d4b0 commit 3ffa3c0
Showing 1 changed file with 3 additions and 70 deletions.
73 changes: 3 additions & 70 deletions fs/aio.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,6 @@ static struct kmem_cache *kioctx_cachep;

static struct workqueue_struct *aio_wq;

/* Used for rare fput completion. */
static void aio_fput_routine(struct work_struct *);
static DECLARE_WORK(fput_work, aio_fput_routine);

static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);

static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);

Expand Down Expand Up @@ -479,7 +472,6 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
{
unsigned short allocated, to_alloc;
long avail;
bool called_fput = false;
struct kiocb *req, *n;
struct aio_ring *ring;

Expand All @@ -495,28 +487,11 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
if (allocated == 0)
goto out;

retry:
spin_lock_irq(&ctx->ctx_lock);
ring = kmap_atomic(ctx->ring_info.ring_pages[0]);

avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
BUG_ON(avail < 0);
if (avail == 0 && !called_fput) {
/*
* Handle a potential starvation case. It is possible that
* we hold the last reference on a struct file, causing us
* to delay the final fput to non-irq context. In this case,
* ctx->reqs_active is artificially high. Calling the fput
* routine here may free up a slot in the event completion
* ring, allowing this allocation to succeed.
*/
kunmap_atomic(ring);
spin_unlock_irq(&ctx->ctx_lock);
aio_fput_routine(NULL);
called_fput = true;
goto retry;
}

if (avail < allocated) {
/* Trim back the number of requests. */
list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
Expand Down Expand Up @@ -570,36 +545,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
wake_up_all(&ctx->wait);
}

static void aio_fput_routine(struct work_struct *data)
{
spin_lock_irq(&fput_lock);
while (likely(!list_empty(&fput_head))) {
struct kiocb *req = list_kiocb(fput_head.next);
struct kioctx *ctx = req->ki_ctx;

list_del(&req->ki_list);
spin_unlock_irq(&fput_lock);

/* Complete the fput(s) */
if (req->ki_filp != NULL)
fput(req->ki_filp);

/* Link the iocb into the context's free list */
rcu_read_lock();
spin_lock_irq(&ctx->ctx_lock);
really_put_req(ctx, req);
/*
* at that point ctx might've been killed, but actual
* freeing is RCU'd
*/
spin_unlock_irq(&ctx->ctx_lock);
rcu_read_unlock();

spin_lock_irq(&fput_lock);
}
spin_unlock_irq(&fput_lock);
}

/* __aio_put_req
* Returns true if this put was the last user of the request.
*/
Expand All @@ -618,21 +563,9 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
req->ki_cancel = NULL;
req->ki_retry = NULL;

/*
* Try to optimize the aio and eventfd file* puts, by avoiding to
* schedule work in case it is not final fput() time. In normal cases,
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
if (unlikely(!fput_atomic(req->ki_filp))) {
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
schedule_work(&fput_work);
} else {
req->ki_filp = NULL;
really_put_req(ctx, req);
}
fput(req->ki_filp);
req->ki_filp = NULL;
really_put_req(ctx, req);
return 1;
}

Expand Down

0 comments on commit 3ffa3c0

Please sign in to comment.