Skip to content

Commit

Permalink
cfq-iosched: adapt slice to number of processes doing I/O
Browse files Browse the repository at this point in the history
When the number of processes performing I/O concurrently increases,
a fixed time slice per process will cause large latencies.

This patch, if low_latency mode is enabled,  will scale the time slice
assigned to each process according to a 300ms target latency.

In order to keep fairness among processes:
* The number of active processes is computed using a special form of
running average, that quickly follows sudden increases (to keep latency low),
and decrease slowly (to have fairness in spite of rapid decreases of this
value).

To safeguard sequential bandwidth, we impose a minimum time slice
(computed using 2*cfq_slice_idle as base, adjusted according to priority
and async-ness).

Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
  • Loading branch information
Corrado Zoccolo authored and Jens Axboe committed Oct 28, 2009
1 parent 1a1238a commit 5db5d64
Showing 1 changed file with 51 additions and 2 deletions.
53 changes: 51 additions & 2 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;

/*
* offset from end of service tree
Expand Down Expand Up @@ -148,6 +150,8 @@ struct cfq_data {
struct rb_root prio_trees[CFQ_PRIO_LISTS];

unsigned int busy_queues;
unsigned int busy_rt_queues;
unsigned int busy_queues_avg[2];

int rq_in_driver[2];
int sync_flight;
Expand Down Expand Up @@ -315,10 +319,52 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
}

/*
* get averaged number of queues of RT/BE priority.
* average is updated, with a formula that gives more weight to higher numbers,
* to quickly follows sudden increases and decrease slowly
*/

static inline unsigned
cfq_get_avg_queues(struct cfq_data *cfqd, bool rt) {
unsigned min_q, max_q;
unsigned mult = cfq_hist_divisor - 1;
unsigned round = cfq_hist_divisor / 2;
unsigned busy = cfqd->busy_rt_queues;

if (!rt)
busy = cfqd->busy_queues - cfqd->busy_rt_queues;

min_q = min(cfqd->busy_queues_avg[rt], busy);
max_q = max(cfqd->busy_queues_avg[rt], busy);
cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
cfq_hist_divisor;
return cfqd->busy_queues_avg[rt];
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
if (cfqd->cfq_latency) {
/* interested queues (we consider only the ones with the same
* priority class) */
unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
unsigned sync_slice = cfqd->cfq_slice[1];
unsigned expect_latency = sync_slice * iq;
if (expect_latency > cfq_target_latency) {
unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
/* scale low_slice according to IO priority
* and sync vs async */
unsigned low_slice =
min(slice, base_low_slice * slice / sync_slice);
/* the adapted slice value is scaled to fit all iqs
* into the target latency */
slice = max(slice * cfq_target_latency / expect_latency,
low_slice);
}
}
cfqq->slice_end = jiffies + slice;
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
}

Expand Down Expand Up @@ -669,7 +715,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;

if (cfq_class_rt(cfqq))
cfqd->busy_rt_queues++;
cfq_resort_rr_list(cfqd, cfqq);
}

Expand All @@ -692,6 +739,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)

BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
if (cfq_class_rt(cfqq))
cfqd->busy_rt_queues--;
}

/*
Expand Down

0 comments on commit 5db5d64

Please sign in to comment.