Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 309196
b: refs/heads/master
c: 6ecf23a
h: refs/heads/master
v: v3
  • Loading branch information
Tejun Heo authored and Jens Axboe committed Mar 6, 2012
1 parent 64f0a47 commit 3e67004
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d732580b4eb31553c63744a47d590f770cafb8f0
refs/heads/master: 6ecf23afab13c39d3bb0e2d826d0984b0dd53733
12 changes: 8 additions & 4 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,8 +372,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->elevator)
elv_drain_elevator(q);

if (drain_all)
blk_throtl_drain(q);
blk_throtl_drain(q);

/*
* This function might be called on a queue which failed
Expand Down Expand Up @@ -415,8 +414,8 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
* issued before. On return, it's guaranteed that no request has ELVPRIV
* set.
* throttled or issued before. On return, it's guaranteed that no request
* is being throttled or has ELVPRIV set.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
Expand Down Expand Up @@ -461,6 +460,11 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);

spin_lock_irq(lock);

/* dead queue is permanently in bypass mode till released */
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);

queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
Expand Down
4 changes: 2 additions & 2 deletions trunk/block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
struct request_queue *q = td->queue;

/* no throttling for dead queue */
if (unlikely(blk_queue_dead(q)))
if (unlikely(blk_queue_bypass(q)))
return NULL;

rcu_read_lock();
Expand All @@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
spin_lock_irq(q->queue_lock);

/* Make sure @q is still alive */
if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_bypass(q))) {
kfree(tg);
return NULL;
}
Expand Down

0 comments on commit 3e67004

Please sign in to comment.