Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 3898
b: refs/heads/master
c: d634453
h: refs/heads/master
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Jun 29, 2005
1 parent 2b17692 commit d9f1525
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 11 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 450991bc1026135ee30482a4a806d069915ab2f6
refs/heads/master: d6344532a26a318c128102507f6328aaafe02d4d
29 changes: 19 additions & 10 deletions trunk/drivers/block/ll_rw_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1867,19 +1867,20 @@ static void freed_request(request_queue_t *q, int rw)

#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
/*
* Get a free request, queue_lock must not be held
* Get a free request, queue_lock must be held.
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = get_io_context(gfp_mask);
struct io_context *ioc = get_io_context(GFP_ATOMIC);

if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
goto out;

spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) {
/*
* The queue will fill after this allocation, so set it as
Expand Down Expand Up @@ -1907,7 +1908,6 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
* The queue is full and the allocating process is not a
* "batcher", and not exempted by the IO scheduler
*/
spin_unlock_irq(q->queue_lock);
goto out;
}

Expand Down Expand Up @@ -1950,7 +1950,6 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;

spin_unlock_irq(q->queue_lock);
goto out;
}

Expand All @@ -1967,6 +1966,8 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
/*
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
static struct request *get_request_wait(request_queue_t *q, int rw,
struct bio *bio)
Expand All @@ -1986,7 +1987,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
if (!rq) {
struct io_context *ioc;

generic_unplug_device(q);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
io_schedule();

/*
Expand All @@ -1998,6 +2000,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
ioc = get_io_context(GFP_NOIO);
ioc_set_batching(q, ioc);
put_io_context(ioc);

spin_lock_irq(q->queue_lock);
}
finish_wait(&rl->wait[rw], &wait);
}
Expand All @@ -2011,14 +2015,18 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)

BUG_ON(rw != READ && rw != WRITE);

if (gfp_mask & __GFP_WAIT)
spin_lock_irq(q->queue_lock);
if (gfp_mask & __GFP_WAIT) {
rq = get_request_wait(q, rw, NULL);
else
} else {
rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
spin_unlock_irq(q->queue_lock);
}
/* q->queue_lock is unlocked at this point */

return rq;
}

EXPORT_SYMBOL(blk_get_request);

/**
Expand Down Expand Up @@ -2605,9 +2613,10 @@ static int __make_request(request_queue_t *q, struct bio *bio)
get_rq:
/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
spin_unlock_irq(q->queue_lock);
req = get_request_wait(q, rw, bio);

/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
Expand Down

0 comments on commit d9f1525

Please sign in to comment.