From d93f28b5423c37d4a884d8c10191c4d738d2b45a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 4 Jun 2012 20:40:56 -0700 Subject: [PATCH] --- yaml --- r: 320914 b: refs/heads/master c: 7f4b35d155a5f9e5748539a79558533aa08d6a81 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/block/blk-core.c | 42 ++++++++++++++------------------------ trunk/block/blk-throttle.c | 3 --- 3 files changed, 16 insertions(+), 31 deletions(-) diff --git a/[refs] b/[refs] index d5f6521b7a99..d01b5247d411 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a06e05e6afab70b4b23c0a7975aaeae24b195cd6 +refs/heads/master: 7f4b35d155a5f9e5748539a79558533aa08d6a81 diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 080204a10fcf..71894e143b91 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, { struct request *rq; struct request_list *rl = &q->rq; - struct elevator_type *et; - struct io_context *ioc; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - bool retried = false; int may_queue; -retry: - et = q->elevator->type; - ioc = rq_ioc(bio); if (unlikely(blk_queue_dead(q))) return NULL; @@ -874,20 +870,6 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { - /* - * We want ioc to record batching state. If it's - * not already there, creating a new one requires - * dropping queue_lock, which in turn requires - * retesting conditions to avoid queue hang. - */ - if (!ioc && !retried) { - spin_unlock_irq(q->queue_lock); - create_io_context(gfp_mask, q->node); - spin_lock_irq(q->queue_lock); - retried = true; - goto retry; - } - /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". @@ -955,12 +937,8 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, /* init elvpriv */ if (rw_flags & REQ_ELVPRIV) { if (unlikely(et->icq_cache && !icq)) { - create_io_context(gfp_mask, q->node); - ioc = rq_ioc(bio); - if (!ioc) - goto fail_elvpriv; - - icq = ioc_create_icq(ioc, q, gfp_mask); + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); if (!icq) goto fail_elvpriv; } @@ -1071,7 +1049,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * to allocate at least one request, and up to a big batch of them * for a small period time. See ioc_batching, ioc_set_batching */ - create_io_context(GFP_NOIO, q->node); ioc_set_batching(q, current->io_context); spin_lock_irq(q->queue_lock); @@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + spin_lock_irq(q->queue_lock); rq = get_request(q, rw, NULL, gfp_mask); if (!rq) @@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + if (blk_throtl_bio(q, bio)) return false; /* throttled, will be resubmitted later */ diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c index 5b0659512047..e287c19908c8 100644 --- a/trunk/block/blk-throttle.c +++ b/trunk/block/blk-throttle.c @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) goto out; } - /* bio_associate_current() needs ioc, try creating */ - create_io_context(GFP_ATOMIC, q->node); - /* * A throtl_grp pointer retrieved under rcu can be used to access * basic fields like stats and io rates. If a group has no rules,