Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 320914
b: refs/heads/master
c: 7f4b35d
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Jun 25, 2012
1 parent 07eb9e2 commit d93f28b
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 31 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a06e05e6afab70b4b23c0a7975aaeae24b195cd6
refs/heads/master: 7f4b35d155a5f9e5748539a79558533aa08d6a81
42 changes: 15 additions & 27 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
{
struct request *rq;
struct request_list *rl = &q->rq;
struct elevator_type *et;
struct io_context *ioc;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
bool retried = false;
int may_queue;
retry:
et = q->elevator->type;
ioc = rq_ioc(bio);

if (unlikely(blk_queue_dead(q)))
return NULL;
Expand All @@ -874,20 +870,6 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,

if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
* We want ioc to record batching state. If it's
* not already there, creating a new one requires
* dropping queue_lock, which in turn requires
* retesting conditions to avoid queue hang.
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
}

/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
Expand Down Expand Up @@ -955,12 +937,8 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
/* init elvpriv */
if (rw_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
create_io_context(gfp_mask, q->node);
ioc = rq_ioc(bio);
if (!ioc)
goto fail_elvpriv;

icq = ioc_create_icq(ioc, q, gfp_mask);
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_elvpriv;
}
Expand Down Expand Up @@ -1071,7 +1049,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* to allocate at least one request, and up to a big batch of them
* for a small period time. See ioc_batching, ioc_set_batching
*/
create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);

spin_lock_irq(q->queue_lock);
Expand All @@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)

BUG_ON(rw != READ && rw != WRITE);

/* create ioc upfront */
create_io_context(gfp_mask, q->node);

spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
Expand Down Expand Up @@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}

/*
* Various block parts want %current->io_context and lazy ioc
* allocation ends up trading a lot of pain for a small amount of
* memory. Just allocate it upfront. This may fail and block
* layer knows how to live with it.
*/
create_io_context(GFP_ATOMIC, q->node);

if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */

Expand Down
3 changes: 0 additions & 3 deletions trunk/block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out;
}

/* bio_associate_current() needs ioc, try creating */
create_io_context(GFP_ATOMIC, q->node);

/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
Expand Down

0 comments on commit d93f28b

Please sign in to comment.