Skip to content

Commit

Permalink
NFS: Add functionality to allow waiting on all outstanding reads to c…
Browse files Browse the repository at this point in the history
…omplete

This will later allow NFS locking code to wait for readahead to complete
before releasing byte range locks.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
  • Loading branch information
Trond Myklebust authored and Trond Myklebust committed Apr 9, 2013
1 parent bc7a05c commit 577b423
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 0 deletions.
1 change: 1 addition & 0 deletions fs/nfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -561,6 +561,7 @@ static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
l_ctx->lockowner.l_owner = current->files;
l_ctx->lockowner.l_pid = current->tgid;
INIT_LIST_HEAD(&l_ctx->list);
nfs_iocounter_init(&l_ctx->io_count);
}

static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
Expand Down
7 changes: 7 additions & 0 deletions fs/nfs/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,13 @@ extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr,
void (*release)(struct nfs_pgio_header *hdr));
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
int nfs_iocounter_wait(struct nfs_io_counter *c);

static inline void nfs_iocounter_init(struct nfs_io_counter *c)
{
c->flags = 0;
atomic_set(&c->io_count, 0);
}

/* nfs2xdr.c */
extern struct rpc_procinfo nfs_procedures[];
Expand Down
51 changes: 51 additions & 0 deletions fs/nfs/pagelist.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,55 @@ nfs_page_free(struct nfs_page *p)
kmem_cache_free(nfs_page_cachep, p);
}

static void
nfs_iocounter_inc(struct nfs_io_counter *c)
{
atomic_inc(&c->io_count);
}

static void
nfs_iocounter_dec(struct nfs_io_counter *c)
{
if (atomic_dec_and_test(&c->io_count)) {
clear_bit(NFS_IO_INPROGRESS, &c->flags);
smp_mb__after_clear_bit();
wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
}
}

static int
__nfs_iocounter_wait(struct nfs_io_counter *c)
{
wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
int ret = 0;

do {
prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
set_bit(NFS_IO_INPROGRESS, &c->flags);
if (atomic_read(&c->io_count) == 0)
break;
ret = nfs_wait_bit_killable(&c->flags);
} while (atomic_read(&c->io_count) != 0);
finish_wait(wq, &q.wait);
return ret;
}

/**
* nfs_iocounter_wait - wait for i/o to complete
* @c: nfs_io_counter to use
*
* returns -ERESTARTSYS if interrupted by a fatal signal.
* Otherwise returns 0 once the io_count hits 0.
*/
int
nfs_iocounter_wait(struct nfs_io_counter *c)
{
if (atomic_read(&c->io_count) == 0)
return 0;
return __nfs_iocounter_wait(c);
}

/**
* nfs_create_request - Create an NFS read/write request.
* @ctx: open context to use
Expand Down Expand Up @@ -118,6 +167,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
return ERR_CAST(l_ctx);
}
req->wb_lock_context = l_ctx;
nfs_iocounter_inc(&l_ctx->io_count);

/* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in
Expand Down Expand Up @@ -177,6 +227,7 @@ static void nfs_clear_request(struct nfs_page *req)
req->wb_page = NULL;
}
if (l_ctx != NULL) {
nfs_iocounter_dec(&l_ctx->io_count);
nfs_put_lock_context(l_ctx);
req->wb_lock_context = NULL;
}
Expand Down
7 changes: 7 additions & 0 deletions include/linux/nfs_fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,18 @@ struct nfs_lockowner {
pid_t l_pid;
};

#define NFS_IO_INPROGRESS 0
struct nfs_io_counter {
unsigned long flags;
atomic_t io_count;
};

struct nfs_lock_context {
atomic_t count;
struct list_head list;
struct nfs_open_context *open_context;
struct nfs_lockowner lockowner;
struct nfs_io_counter io_count;
};

struct nfs4_state;
Expand Down

0 comments on commit 577b423

Please sign in to comment.