Skip to content

Commit

Permalink
dm cache policy mq: tweak algorithm that decides when to promote a block
Browse files Browse the repository at this point in the history
Rather than maintaining a separate promote_threshold variable that we
periodically update we now use the hit count of the oldest clean
block.  Also add a fudge factor to discourage demoting dirty blocks.

With some tests this has a sizeable difference, because the old code
was too eager to demote blocks.  For example, device-mapper-test-suite's
git_extract_cache_quick test goes from taking 190 seconds, to 142
(linear on spindle takes 250).

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
  • Loading branch information
Joe Thornber authored and Mike Snitzer committed Nov 10, 2014
1 parent 41abc4e commit b155aa0
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 28 deletions.
6 changes: 3 additions & 3 deletions Documentation/device-mapper/cache-policies.txt
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ since spindles tend to have good bandwidth. The io_tracker counts
contiguous I/Os to try to spot when the io is in one of these sequential
modes.

Internally the mq policy maintains a promotion threshold variable. If
the hit count of a block not in the cache goes above this threshold it
gets promoted to the cache. The read, write and discard promote adjustment
Internally the mq policy determines a promotion threshold. If the hit
count of a block not in the cache goes above this threshold it gets
promoted to the cache. The read, write and discard promote adjustment
tunables allow you to tweak the promotion threshold by adding a small
value based on the io type. They default to 4, 8 and 1 respectively.
If you're trying to quickly warm a new cache device you may wish to
Expand Down
75 changes: 50 additions & 25 deletions drivers/md/dm-cache-policy-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,24 +181,30 @@ static void queue_shift_down(struct queue *q)
* Gives us the oldest entry of the lowest popoulated level. If the first
* level is emptied then we shift down one level.
*/
static struct list_head *queue_pop(struct queue *q)
static struct list_head *queue_peek(struct queue *q)
{
unsigned level;
struct list_head *r;

for (level = 0; level < NR_QUEUE_LEVELS; level++)
if (!list_empty(q->qs + level)) {
r = q->qs[level].next;
list_del(r);
if (!list_empty(q->qs + level))
return q->qs[level].next;

/* have we just emptied the bottom level? */
if (level == 0 && list_empty(q->qs))
queue_shift_down(q);
return NULL;
}

return r;
}
static struct list_head *queue_pop(struct queue *q)
{
struct list_head *r = queue_peek(q);

return NULL;
if (r) {
list_del(r);

/* have we just emptied the bottom level? */
if (list_empty(q->qs))
queue_shift_down(q);
}

return r;
}

static struct list_head *list_pop(struct list_head *lh)
Expand Down Expand Up @@ -383,13 +389,6 @@ struct mq_policy {
unsigned generation;
unsigned generation_period; /* in lookups (will probably change) */

/*
* Entries in the pre_cache whose hit count passes the promotion
* threshold move to the cache proper. Working out the correct
* value for the promotion_threshold is crucial to this policy.
*/
unsigned promote_threshold;

unsigned discard_promote_adjustment;
unsigned read_promote_adjustment;
unsigned write_promote_adjustment;
Expand All @@ -406,6 +405,7 @@ struct mq_policy {
#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128

/*----------------------------------------------------------------*/

Expand Down Expand Up @@ -518,6 +518,12 @@ static struct entry *pop(struct mq_policy *mq, struct queue *q)
return e;
}

static struct entry *peek(struct queue *q)
{
struct list_head *h = queue_peek(q);
return h ? container_of(h, struct entry, list) : NULL;
}

/*
* Has this entry already been updated?
*/
Expand Down Expand Up @@ -570,10 +576,6 @@ static void check_generation(struct mq_policy *mq)
break;
}
}

mq->promote_threshold = nr ? total / nr : 1;
if (mq->promote_threshold * nr < total)
mq->promote_threshold++;
}
}

Expand Down Expand Up @@ -640,6 +642,30 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
return 0;
}

/*
* Entries in the pre_cache whose hit count passes the promotion
* threshold move to the cache proper. Working out the correct
* value for the promotion_threshold is crucial to this policy.
*/
static unsigned promote_threshold(struct mq_policy *mq)
{
struct entry *e;

if (any_free_cblocks(mq))
return 0;

e = peek(&mq->cache_clean);
if (e)
return e->hit_count;

e = peek(&mq->cache_dirty);
if (e)
return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;

/* This should never happen */
return 0;
}

/*
* We modify the basic promotion_threshold depending on the specific io.
*
Expand All @@ -653,7 +679,7 @@ static unsigned adjusted_promote_threshold(struct mq_policy *mq,
bool discarded_oblock, int data_dir)
{
if (data_dir == READ)
return mq->promote_threshold + mq->read_promote_adjustment;
return promote_threshold(mq) + mq->read_promote_adjustment;

if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/*
Expand All @@ -663,7 +689,7 @@ static unsigned adjusted_promote_threshold(struct mq_policy *mq,
return mq->discard_promote_adjustment;
}

return mq->promote_threshold + mq->write_promote_adjustment;
return promote_threshold(mq) + mq->write_promote_adjustment;
}

static bool should_promote(struct mq_policy *mq, struct entry *e,
Expand Down Expand Up @@ -1230,7 +1256,6 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->tick = 0;
mq->hit_count = 0;
mq->generation = 0;
mq->promote_threshold = 0;
mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
Expand Down

0 comments on commit b155aa0

Please sign in to comment.