Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 320913
b: refs/heads/master
c: a06e05e
h: refs/heads/master
i:
  320911: 26464ef
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Jun 25, 2012
1 parent ca81880 commit 07eb9e2
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 40 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 86072d8112595ea1b6beeb33f578e7c2839e014e
refs/heads/master: a06e05e6afab70b4b23c0a7975aaeae24b195cd6
74 changes: 35 additions & 39 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -837,7 +837,7 @@ static struct io_context *rq_ioc(struct bio *bio)
}

/**
* get_request - get a free request
* __get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
Expand All @@ -850,8 +850,8 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
static struct request *__get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
struct request *rq;
struct request_list *rl = &q->rq;
Expand Down Expand Up @@ -1029,56 +1029,55 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
}

/**
* get_request_wait - get a free request with retry
* get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
* Get a free request from @q. This function keeps retrying under memory
* pressure and fails iff @q is dead.
* Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
* function keeps retrying under memory pressure and fails iff @q is dead.
*
* Must be callled with @q->queue_lock held and,
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(rw_flags) != 0;
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
struct request *rq;
retry:
rq = __get_request(q, rw_flags, bio, gfp_mask);
if (rq)
return rq;

rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;

if (unlikely(blk_queue_dead(q)))
return NULL;

prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q)))
return NULL;

trace_block_sleeprq(q, bio, rw_flags & 1);
/* wait on @rl and retry */
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);

spin_unlock_irq(q->queue_lock);
io_schedule();
trace_block_sleeprq(q, bio, rw_flags & 1);

/*
* After sleeping, we become a "batching" process and
* will be able to allocate at least one request, and
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);
spin_unlock_irq(q->queue_lock);
io_schedule();

spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[is_sync], &wait);
/*
* After sleeping, we become a "batching" process and will be able
* to allocate at least one request, and up to a big batch of them
* for a small period time. See ioc_batching, ioc_set_batching
*/
create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);

rq = get_request(q, rw_flags, bio, GFP_NOIO);
};
spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[is_sync], &wait);

return rq;
goto retry;
}

struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
Expand All @@ -1088,10 +1087,7 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
BUG_ON(rw != READ && rw != WRITE);

spin_lock_irq(q->queue_lock);
if (gfp_mask & __GFP_WAIT)
rq = get_request_wait(q, rw, NULL);
else
rq = get_request(q, rw, NULL, gfp_mask);
rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
Expand Down Expand Up @@ -1481,7 +1477,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
req = get_request_wait(q, rw_flags, bio);
req = get_request(q, rw_flags, bio, GFP_NOIO);
if (unlikely(!req)) {
bio_endio(bio, -ENODEV); /* @q is dead */
goto out_unlock;
Expand Down

0 comments on commit 07eb9e2

Please sign in to comment.