Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 251139
b: refs/heads/master
c: 3ac0cc4
h: refs/heads/master
i:
  251137: c8208b2
  251135: c235490
v: v3
  • Loading branch information
shaohua.li@intel.com authored and Jens Axboe committed May 6, 2011
1 parent 878cf09 commit ae40f0c
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f3876930952390a31c3a7fd68dd621464a36eb80
refs/heads/master: 3ac0cc4508709d42ec9aa351086c7d38bfc0660c
16 changes: 11 additions & 5 deletions trunk/block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
}

/*
* Moving a request silently to empty queue_head may stall the
* queue. Kick the queue in those cases. This function is called
* from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd.
* Kick the queue to avoid stall for two cases:
* 1. Moving a request silently to empty queue_head may stall the
* queue.
* 2. When flush request is running in non-queueable queue, the
* queue is hold. Restart the queue after flush request is finished
* to avoid stall.
* This function is called from request completion path and calling
* directly into request_fn may confuse the driver. Always use
* kblockd.
*/
if (queued)
if (queued || q->flush_queue_delayed)
blk_run_queue_async(q);
q->flush_queue_delayed = 0;
}

/**
Expand Down
21 changes: 20 additions & 1 deletion trunk/block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,26 @@ static inline struct request *__elv_next_request(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
return rq;
}

/*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is
* finished. Even we don't do this, driver can't dispatch next
* requests and will requeue them. And this can improve
* throughput too. For example, we have request flush1, write1,
* flush 2. flush1 is dispatched, then queue is hold, write1
* isn't inserted to queue. After flush1 is finished, flush2
* will be dispatched. Since disk cache is already clean,
* flush2 will be finished very soon, so looks like flush2 is
* folded to flush1.
* Since the queue is hold, a flag is set to indicate the queue
* should be restarted later. Please see flush_end_io() for
* details.
*/
if (q->flush_pending_idx != q->flush_running_idx &&
!queue_flush_queueable(q)) {
q->flush_queue_delayed = 1;
return NULL;
}
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL;
}
Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,7 @@ struct request_queue
*/
unsigned int flush_flags;
unsigned int flush_not_queueable:1;
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
unsigned long flush_pending_since;
Expand Down

0 comments on commit ae40f0c

Please sign in to comment.