Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 345669
b: refs/heads/master
c: 7046057
h: refs/heads/master
i:
  345667: ed17e20
v: v3
  • Loading branch information
Bart Van Assche authored and Jens Axboe committed Dec 6, 2012
1 parent 62bb852 commit e022116
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 25 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c246e80d86736312933646896c4157daf511dadc
refs/heads/master: 704605711ef048a7c6ad2ec599f15d2e0baf86b2
33 changes: 9 additions & 24 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,12 +219,13 @@ static void blk_delay_work(struct work_struct *work)
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
* restarted around the specified time.
* restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
if (likely(!blk_queue_dead(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);

Expand Down Expand Up @@ -334,11 +335,11 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us.
* of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
if (likely(!blk_queue_stopped(q)))
if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
Expand Down Expand Up @@ -2913,27 +2914,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
{
trace_block_unplug(q, depth, !from_schedule);

/*
* Don't mess with a dying queue.
*/
if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock);
return;
}

/*
* If we are punting this to kblockd, then we can safely drop
* the queue_lock before waking kblockd (which needs to take
* this lock).
*/
if (from_schedule) {
spin_unlock(q->queue_lock);
if (from_schedule)
blk_run_queue_async(q);
} else {
else
__blk_run_queue(q);
spin_unlock(q->queue_lock);
}

spin_unlock(q->queue_lock);
}

static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
Expand Down

0 comments on commit e022116

Please sign in to comment.