Skip to content

Commit

Permalink
Merge branch 'for-3.4/core' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block core bits from Jens Axboe:
 "It's a nice and quiet round this time, since most of the tricky stuff
  has been pushed to 3.5 to give it more time to mature.  After a few
  hectic block IO core changes for 3.3 and 3.2, I'm quite happy with a
  slow round.

  Really minor stuff in here, the only real functional change is making
  the auto-unplug threshold a per-queue entity.  The threshold is set so
  that it's low enough that we don't hold off IO for too long, but still
  big enough to get a nice benefit from the batched insert (and hence
  queue lock cost reduction).  For raid configurations, this currently
  breaks down."

* 'for-3.4/core' of git://git.kernel.dk/linux-block:
  block: make auto block plug flush threshold per-disk based
  Documentation: Add sysfs ABI change for cfq's target latency.
  block: Make cfq_target_latency tunable through sysfs.
  block: use lockdep_assert_held for queue locking
  block: blk_alloc_queue_node(): use caller's GFP flags instead of GFP_KERNEL
  • Loading branch information
Linus Torvalds committed Apr 14, 2012
2 parents 2d59dcf + 1b2e19f commit d8dd0b6
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 16 deletions.
8 changes: 8 additions & 0 deletions Documentation/ABI/testing/sysfs-cfq-target-latency
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
What: /sys/block/<device>/iosched/target_latency
Date: March 2012
contact: Tao Ma <boyu.mt@taobao.com>
Description:
The /sys/block/<device>/iosched/target_latency only exists
when the user sets cfq to /sys/block/<device>/scheduler.
It contains an estimated latency time for the cfq. cfq will
use it to calculate the time slice used for every task.
5 changes: 3 additions & 2 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;

q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;

Expand Down Expand Up @@ -1277,7 +1277,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret;

(*request_count)++;
if (rq->q == q)
(*request_count)++;

if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
Expand Down
2 changes: 1 addition & 1 deletion block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q)
struct bio_list bl;
struct bio *bio;

WARN_ON_ONCE(!queue_is_locked(q));
queue_lockdep_assert_held(q);

bio_list_init(&bl);

Expand Down
10 changes: 8 additions & 2 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,7 @@ struct cfq_data {
unsigned int cfq_slice_idle;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
unsigned int cfq_target_latency;

/*
* Fallback dummy cfqq for extreme OOM conditions
Expand Down Expand Up @@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;

return cfq_target_latency * cfqg->weight / st->total_weight;
return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
}

static inline unsigned
Expand Down Expand Up @@ -2271,7 +2272,8 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
* to have higher weight. A more accurate thing would be to
* calculate system wide asnc/sync ratio.
*/
tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
tmp = cfqd->cfq_target_latency *
cfqg_busy_async_queues(cfqd, cfqg);
tmp = tmp/cfqd->busy_queues;
slice = min_t(unsigned, slice, tmp);

Expand Down Expand Up @@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_back_penalty = cfq_back_penalty;
cfqd->cfq_slice[0] = cfq_slice_async;
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_group_idle = cfq_group_idle;
Expand Down Expand Up @@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Expand Down Expand Up @@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION

#define CFQ_ATTR(name) \
Expand All @@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_idle),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
CFQ_ATTR(target_latency),
__ATTR_NULL
};

Expand Down
18 changes: 7 additions & 11 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -426,14 +426,10 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))

static inline int queue_is_locked(struct request_queue *q)
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
#ifdef CONFIG_SMP
spinlock_t *lock = q->queue_lock;
return lock && spin_is_locked(lock);
#else
return 1;
#endif
if (q->queue_lock)
lockdep_assert_held(q->queue_lock);
}

static inline void queue_flag_set_unlocked(unsigned int flag,
Expand All @@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
static inline int queue_flag_test_and_clear(unsigned int flag,
struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
queue_lockdep_assert_held(q);

if (test_bit(flag, &q->queue_flags)) {
__clear_bit(flag, &q->queue_flags);
Expand All @@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag,
static inline int queue_flag_test_and_set(unsigned int flag,
struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
queue_lockdep_assert_held(q);

if (!test_bit(flag, &q->queue_flags)) {
__set_bit(flag, &q->queue_flags);
Expand All @@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag,

static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
queue_lockdep_assert_held(q);
__set_bit(flag, &q->queue_flags);
}

Expand All @@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q)

static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
queue_lockdep_assert_held(q);
__clear_bit(flag, &q->queue_flags);
}

Expand Down

0 comments on commit d8dd0b6

Please sign in to comment.