Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 142051
b: refs/heads/master
c: 3cd6927
h: refs/heads/master
i:
  142049: 8af4f70
  142047: 5f727cf
v: v3
  • Loading branch information
Linus Torvalds committed Apr 6, 2009
1 parent cf8e1fe commit 61db920
Show file tree
Hide file tree
Showing 14 changed files with 172 additions and 109 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 67a32ec750109fdfc7cba311145a18d543521822
refs/heads/master: 3cd69271f86770499425c7cea2902512ba936a75
85 changes: 48 additions & 37 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;

rl->count[READ] = rl->count[WRITE] = 0;
rl->starved[READ] = rl->starved[WRITE] = 0;
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[READ]);
init_waitqueue_head(&rl->wait[WRITE]);
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);

rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, request_cachep, q->node);
Expand Down Expand Up @@ -699,37 +699,37 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}

static void __freed_request(struct request_queue *q, int rw)
static void __freed_request(struct request_queue *q, int sync)
{
struct request_list *rl = &q->rq;

if (rl->count[rw] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, rw);
if (rl->count[sync] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, sync);

if (rl->count[rw] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[rw]))
wake_up(&rl->wait[rw]);
if (rl->count[sync] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[sync]))
wake_up(&rl->wait[sync]);

blk_clear_queue_full(q, rw);
blk_clear_queue_full(q, sync);
}
}

/*
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
static void freed_request(struct request_queue *q, int rw, int priv)
static void freed_request(struct request_queue *q, int sync, int priv)
{
struct request_list *rl = &q->rq;

rl->count[rw]--;
rl->count[sync]--;
if (priv)
rl->elvpriv--;

__freed_request(q, rw);
__freed_request(q, sync);

if (unlikely(rl->starved[rw ^ 1]))
__freed_request(q, rw ^ 1);
if (unlikely(rl->starved[sync ^ 1]))
__freed_request(q, sync ^ 1);
}

/*
Expand All @@ -743,25 +743,25 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
const int rw = rw_flags & 0x01;
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue, priv;

may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;

if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[rw]+1 >= q->nr_requests) {
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
ioc = current_io_context(GFP_ATOMIC, q->node);
/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
if (!blk_queue_full(q, rw)) {
if (!blk_queue_full(q, is_sync)) {
ioc_set_batching(q, ioc);
blk_set_queue_full(q, rw);
blk_set_queue_full(q, is_sync);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
Expand All @@ -774,19 +774,19 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
}
}
}
blk_set_queue_congested(q, rw);
blk_set_queue_congested(q, is_sync);
}

/*
* Only allow batching queuers to allocate up to 50% over the defined
* limit of requests, otherwise we could have thousands of requests
* allocated with any setting of ->nr_requests
*/
if (rl->count[rw] >= (3 * q->nr_requests / 2))
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
goto out;

rl->count[rw]++;
rl->starved[rw] = 0;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;

priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
if (priv)
Expand All @@ -804,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* wait queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw, priv);
freed_request(q, is_sync, priv);

/*
* in the very unlikely event that allocation failed and no
Expand All @@ -814,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* rq mempool into READ and WRITE
*/
rq_starved:
if (unlikely(rl->count[rw] == 0))
rl->starved[rw] = 1;
if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1;

goto out;
}
Expand All @@ -829,7 +829,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;

trace_block_getrq(q, bio, rw);
trace_block_getrq(q, bio, rw_flags & 1);
out:
return rq;
}
Expand All @@ -843,7 +843,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
{
const int rw = rw_flags & 0x01;
const bool is_sync = rw_is_sync(rw_flags) != 0;
struct request *rq;

rq = get_request(q, rw_flags, bio, GFP_NOIO);
Expand All @@ -852,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct io_context *ioc;
struct request_list *rl = &q->rq;

prepare_to_wait_exclusive(&rl->wait[rw], &wait,
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
TASK_UNINTERRUPTIBLE);

trace_block_sleeprq(q, bio, rw);
trace_block_sleeprq(q, bio, rw_flags & 1);

__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
Expand All @@ -871,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
ioc_set_batching(q, ioc);

spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[rw], &wait);
finish_wait(&rl->wait[is_sync], &wait);

rq = get_request(q, rw_flags, bio, GFP_NOIO);
};
Expand Down Expand Up @@ -1070,14 +1070,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
* it didn't come out of our reserved rq pools
*/
if (req->cmd_flags & REQ_ALLOCED) {
int rw = rq_data_dir(req);
int is_sync = rq_is_sync(req) != 0;
int priv = req->cmd_flags & REQ_ELVPRIV;

BUG_ON(!list_empty(&req->queuelist));
BUG_ON(!hlist_unhashed(&req->hash));

blk_free_request(q, req);
freed_request(q, rw, priv);
freed_request(q, is_sync, priv);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
Expand Down Expand Up @@ -1128,6 +1128,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_UNPLUG;
if (bio_rw_meta(bio))
req->cmd_flags |= REQ_RW_META;
if (bio_noidle(bio))
req->cmd_flags |= REQ_NOIDLE;

req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
Expand All @@ -1136,6 +1138,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}

/*
* Only disabling plugging for non-rotational devices if it does tagging
* as well, otherwise we do need the proper merging
*/
static inline bool queue_should_plug(struct request_queue *q)
{
return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
}

static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
Expand Down Expand Up @@ -1242,11 +1253,11 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE))
req->cpu = blk_cpu_to_group(smp_processor_id());
if (!blk_queue_nonrot(q) && elv_queue_empty(q))
if (queue_should_plug(q) && elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
if (unplug || blk_queue_nonrot(q))
if (unplug || !queue_should_plug(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;
Expand Down
40 changes: 20 additions & 20 deletions trunk/block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
q->nr_requests = nr;
blk_queue_congestion_threshold(q);

if (rl->count[READ] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, READ);
else if (rl->count[READ] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, READ);

if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, WRITE);
else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, WRITE);

if (rl->count[READ] >= q->nr_requests) {
blk_set_queue_full(q, READ);
} else if (rl->count[READ]+1 <= q->nr_requests) {
blk_clear_queue_full(q, READ);
wake_up(&rl->wait[READ]);
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_SYNC);

if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);

if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_SYNC);
} else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
blk_clear_queue_full(q, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}

if (rl->count[WRITE] >= q->nr_requests) {
blk_set_queue_full(q, WRITE);
} else if (rl->count[WRITE]+1 <= q->nr_requests) {
blk_clear_queue_full(q, WRITE);
wake_up(&rl->wait[WRITE]);
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_ASYNC);
} else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
blk_clear_queue_full(q, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
spin_unlock_irq(q->queue_lock);
return ret;
Expand Down
4 changes: 3 additions & 1 deletion trunk/block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1992,8 +1992,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
cfq_slice_expired(cfqd, 1);
else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
else if (sync && !rq_noidle(rq) &&
RB_EMPTY_ROOT(&cfqq->sort_list)) {
cfq_arm_slice_timer(cfqd);
}
}

if (!cfqd->rq_in_driver)
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
}

if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[READ] + q->rq.count[WRITE]
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- q->in_flight;

if (nrq >= q->unplug_thresh)
Expand Down
22 changes: 17 additions & 5 deletions trunk/fs/buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
struct address_space *mapping;
struct address_space *mapping, *prev_mapping = NULL;
int err = 0, err2;

INIT_LIST_HEAD(&tmp);
Expand All @@ -762,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* contents - it is a noop if I/O is still in
* flight on potentially older contents.
*/
ll_rw_block(SWRITE_SYNC, 1, &bh);
ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);

/*
* Kick off IO for the previous mapping. Note
* that we will not run the very last mapping,
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
if (prev_mapping && prev_mapping != mapping)
blk_run_address_space(prev_mapping);
prev_mapping = mapping;

brelse(bh);
spin_lock(lock);
}
Expand Down Expand Up @@ -2957,12 +2968,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];

if (rw == SWRITE || rw == SWRITE_SYNC)
if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
lock_buffer(bh);
else if (!trylock_buffer(bh))
continue;

if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
rw == SWRITE_SYNC_PLUG) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
Expand Down Expand Up @@ -2998,7 +3010,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(WRITE, bh);
ret = submit_bh(WRITE_SYNC, bh);
wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);
Expand Down
2 changes: 1 addition & 1 deletion trunk/fs/direct-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -1126,7 +1126,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
int acquire_i_mutex = 0;

if (rw & WRITE)
rw = WRITE_SYNC;
rw = WRITE_ODIRECT;

if (bdev)
bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
Expand Down
7 changes: 6 additions & 1 deletion trunk/fs/jbd/commit.c
Original file line number Diff line number Diff line change
Expand Up @@ -351,8 +351,13 @@ void journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;

/*
* Use plugged writes here, since we want to submit several before
* we unplug the device. We don't do explicit unplugging in here,
* instead we rely on sync_buffer() doing the unplug for us.
*/
if (commit_transaction->t_synchronous_commit)
write_op = WRITE_SYNC;
write_op = WRITE_SYNC_PLUG;
spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) {
DEFINE_WAIT(wait);
Expand Down
Loading

0 comments on commit 61db920

Please sign in to comment.