Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 44007
b: refs/heads/master
c: 0273201
h: refs/heads/master
i:
  44005: 81d2b60
  44003: c640df3
  43999: 53cb41b
v: v3
  • Loading branch information
Zach Brown authored and Linus Torvalds committed Dec 10, 2006
1 parent d3e4362 commit 54aa199
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 75 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 17a7b1d74b1207f8f1af40b5d184989076d08f8b
refs/heads/master: 0273201e693fd62381f6b1e85b15ffc117d8a46e
140 changes: 66 additions & 74 deletions trunk/fs/direct-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,8 @@ struct dio {
int page_errors; /* errno from get_user_pages() */

/* BIO completion state */
atomic_t refcount; /* direct_io_worker() and bios */
spinlock_t bio_lock; /* protects BIO fields below */
int bio_count; /* nr bios to be completed */
int bios_in_flight; /* nr bios in flight */
struct bio *bio_list; /* singly linked via bi_private */
struct task_struct *waiter; /* waiting task (NULL if none) */

Expand Down Expand Up @@ -256,44 +255,27 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
* Called when a BIO has been processed. If the count goes to zero then IO is
* complete and we can signal this to the AIO layer.
*/
static void finished_one_bio(struct dio *dio)
static void dio_complete_aio(struct dio *dio)
{
unsigned long flags;
int ret;

spin_lock_irqsave(&dio->bio_lock, flags);
if (dio->bio_count == 1) {
if (dio->is_async) {
int ret;

/*
* Last reference to the dio is going away.
* Drop spinlock and complete the DIO.
*/
spin_unlock_irqrestore(&dio->bio_lock, flags);

ret = dio_complete(dio, dio->iocb->ki_pos, 0);
ret = dio_complete(dio, dio->iocb->ki_pos, 0);

/* Complete AIO later if falling back to buffered i/o */
if (dio->result == dio->size ||
((dio->rw == READ) && dio->result)) {
aio_complete(dio->iocb, ret, 0);
kfree(dio);
return;
} else {
/*
* Falling back to buffered
*/
spin_lock_irqsave(&dio->bio_lock, flags);
dio->bio_count--;
if (dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
return;
}
}
/* Complete AIO later if falling back to buffered i/o */
if (dio->result == dio->size ||
((dio->rw == READ) && dio->result)) {
aio_complete(dio->iocb, ret, 0);
kfree(dio);
} else {
/*
* Falling back to buffered
*/
spin_lock_irqsave(&dio->bio_lock, flags);
if (dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
}
dio->bio_count--;
spin_unlock_irqrestore(&dio->bio_lock, flags);
}

static int dio_bio_complete(struct dio *dio, struct bio *bio);
Expand All @@ -309,6 +291,10 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)

/* cleanup the bio */
dio_bio_complete(dio, bio);

if (atomic_dec_and_test(&dio->refcount))
dio_complete_aio(dio);

return 0;
}

Expand All @@ -330,8 +316,7 @@ static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
spin_lock_irqsave(&dio->bio_lock, flags);
bio->bi_private = dio->bio_list;
dio->bio_list = bio;
dio->bios_in_flight--;
if (dio->waiter && dio->bios_in_flight == 0)
if ((atomic_sub_return(1, &dio->refcount) == 1) && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
return 0;
Expand Down Expand Up @@ -362,17 +347,15 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
* In the AIO read case we speculatively dirty the pages before starting IO.
* During IO completion, any of these pages which happen to have been written
* back will be redirtied by bio_check_pages_dirty().
*
* bios hold a dio reference between submit_bio and ->end_io.
*/
static void dio_bio_submit(struct dio *dio)
{
struct bio *bio = dio->bio;
unsigned long flags;

bio->bi_private = dio;
spin_lock_irqsave(&dio->bio_lock, flags);
dio->bio_count++;
dio->bios_in_flight++;
spin_unlock_irqrestore(&dio->bio_lock, flags);
atomic_inc(&dio->refcount);
if (dio->is_async && dio->rw == READ)
bio_set_pages_dirty(bio);
submit_bio(dio->rw, bio);
Expand All @@ -390,18 +373,28 @@ static void dio_cleanup(struct dio *dio)
page_cache_release(dio_get_page(dio));
}

static int wait_for_more_bios(struct dio *dio)
{
assert_spin_locked(&dio->bio_lock);

return (atomic_read(&dio->refcount) > 1) && (dio->bio_list == NULL);
}

/*
* Wait for the next BIO to complete. Remove it and return it.
* Wait for the next BIO to complete. Remove it and return it. NULL is
* returned once all BIOs have been completed. This must only be called once
* all bios have been issued so that dio->refcount can only decrease. This
* requires that that the caller hold a reference on the dio.
*/
static struct bio *dio_await_one(struct dio *dio)
{
unsigned long flags;
struct bio *bio;
struct bio *bio = NULL;

spin_lock_irqsave(&dio->bio_lock, flags);
while (dio->bio_list == NULL) {
while (wait_for_more_bios(dio)) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (dio->bio_list == NULL) {
if (wait_for_more_bios(dio)) {
dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags);
io_schedule();
Expand All @@ -410,8 +403,10 @@ static struct bio *dio_await_one(struct dio *dio)
}
set_current_state(TASK_RUNNING);
}
bio = dio->bio_list;
dio->bio_list = bio->bi_private;
if (dio->bio_list) {
bio = dio->bio_list;
dio->bio_list = bio->bi_private;
}
spin_unlock_irqrestore(&dio->bio_lock, flags);
return bio;
}
Expand Down Expand Up @@ -440,25 +435,24 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
}
bio_put(bio);
}
finished_one_bio(dio);
return uptodate ? 0 : -EIO;
}

/*
* Wait on and process all in-flight BIOs.
* Wait on and process all in-flight BIOs. This must only be called once
* all bios have been issued so that the refcount can only decrease.
* This just waits for all bios to make it through dio_bio_complete. IO
* errors are propogated through dio->io_error and should be propogated via
* dio_complete().
*/
static void dio_await_completion(struct dio *dio)
{
/*
* The bio_lock is not held for the read of bio_count.
* This is ok since it is the dio_bio_complete() that changes
* bio_count.
*/
while (dio->bio_count) {
struct bio *bio = dio_await_one(dio);
/* io errors are propogated through dio->io_error */
dio_bio_complete(dio, bio);
}
struct bio *bio;
do {
bio = dio_await_one(dio);
if (bio)
dio_bio_complete(dio, bio);
} while (bio);
}

/*
Expand Down Expand Up @@ -995,16 +989,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
dio->iocb = iocb;
dio->i_size = i_size_read(inode);

/*
* BIO completion state.
*
* ->bio_count starts out at one, and we decrement it to zero after all
* BIOs are submitted. This to avoid the situation where a really fast
* (or synchronous) device could take the count to zero while we're
* still submitting BIOs.
*/
dio->bio_count = 1;
dio->bios_in_flight = 0;
atomic_set(&dio->refcount, 1);
spin_lock_init(&dio->bio_lock);
dio->bio_list = NULL;
dio->waiter = NULL;
Expand Down Expand Up @@ -1111,7 +1096,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
}
if (ret == 0)
ret = dio->result;
finished_one_bio(dio); /* This can free the dio */

/* this can free the dio */
if (atomic_dec_and_test(&dio->refcount))
dio_complete_aio(dio);

if (should_wait) {
unsigned long flags;
/*
Expand All @@ -1122,7 +1111,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,

spin_lock_irqsave(&dio->bio_lock, flags);
set_current_state(TASK_UNINTERRUPTIBLE);
while (dio->bio_count) {
while (atomic_read(&dio->refcount)) {
spin_unlock_irqrestore(&dio->bio_lock, flags);
io_schedule();
spin_lock_irqsave(&dio->bio_lock, flags);
Expand All @@ -1133,7 +1122,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
kfree(dio);
}
} else {
finished_one_bio(dio);
dio_await_completion(dio);

ret = dio_complete(dio, offset, ret);
Expand All @@ -1146,7 +1134,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
* i/o, we have to mark the the aio complete.
*/
aio_complete(iocb, ret, 0);
kfree(dio);

if (atomic_dec_and_test(&dio->refcount))
kfree(dio);
else
BUG();
}
return ret;
}
Expand Down

0 comments on commit 54aa199

Please sign in to comment.