Skip to content

Commit

Permalink
block: change the tag sync vs async restriction logic
Browse files Browse the repository at this point in the history
Make them fully share the tag space, but disallow async requests using
the last any two slots.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
  • Loading branch information
Jens Axboe committed May 20, 2009
1 parent ac36552 commit 0a7ae2f
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 13 deletions.
2 changes: 1 addition & 1 deletion block/blk-barrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
} else
skip |= QUEUE_ORDSEQ_PREFLUSH;

if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
rq = NULL;
else
skip |= QUEUE_ORDSEQ_DRAIN;
Expand Down
2 changes: 1 addition & 1 deletion block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1815,7 +1815,7 @@ void blk_dequeue_request(struct request *rq)
* the driver side.
*/
if (blk_account_rq(rq))
q->in_flight++;
q->in_flight[rq_is_sync(rq)]++;
}

/**
Expand Down
15 changes: 9 additions & 6 deletions block/blk-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth, offset;
unsigned max_depth;
int tag;

if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
Expand All @@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
if (rq_is_sync(rq))
offset = 0;
else
offset = max_depth >> 2;
if (!rq_is_sync(rq) && max_depth > 1) {
max_depth -= 2;
if (!max_depth)
max_depth = 1;
if (q->in_flight[0] > max_depth)
return 1;
}

do {
tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;

Expand Down
8 changes: 4 additions & 4 deletions block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
* in_flight count again
*/
if (blk_account_rq(rq)) {
q->in_flight--;
q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq))
elv_deactivate_rq(q, rq);
}
Expand Down Expand Up @@ -685,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)

if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- q->in_flight;
- queue_in_flight(q);

if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
Expand Down Expand Up @@ -823,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* request is released from the driver, io must be done
*/
if (blk_account_rq(rq)) {
q->in_flight--;
q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
Expand All @@ -838,7 +838,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
if (!list_empty(&q->queue_head))
next = list_entry_rq(q->queue_head.next);

if (!q->in_flight &&
if (!queue_in_flight(q) &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
Expand Down
7 changes: 6 additions & 1 deletion include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ struct request_queue
struct list_head tag_busy_list;

unsigned int nr_sorted;
unsigned int in_flight;
unsigned int in_flight[2];

unsigned int rq_timeout;
struct timer_list timeout;
Expand Down Expand Up @@ -511,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
__clear_bit(flag, &q->queue_flags);
}

static inline int queue_in_flight(struct request_queue *q)
{
return q->in_flight[0] + q->in_flight[1];
}

static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
Expand Down

0 comments on commit 0a7ae2f

Please sign in to comment.