Skip to content

Commit

Permalink
Merge tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/agk/linux-dm

Pull device-mapper updates from Alasdair Kergon:
 "Allow devices that hold metadata for the device-mapper thin
  provisioning target to be extended easily; allow WRITE SAME on
  multipath devices; an assortment of little fixes and clean-ups."

* tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (21 commits)
  dm cache: set config value
  dm cache: move config fns
  dm thin: generate event when metadata threshold passed
  dm persistent metadata: add space map threshold callback
  dm persistent data: add threshold callback to space map
  dm thin: detect metadata device resizing
  dm persistent data: support space map resizing
  dm thin: open dev read only when possible
  dm thin: refactor data dev resize
  dm cache: replace memcpy with struct assignment
  dm cache: fix typos in comments
  dm cache policy: fix description of lookup fn
  dm: document iterate_devices
  dm persistent data: fix error message typos
  dm cache: tune migration throttling
  dm mpath: enable WRITE SAME support
  dm table: fix write same support
  dm bufio: avoid a possible __vmalloc deadlock
  dm snapshot: fix error return code in snapshot_ctr
  dm cache: fix error return code in cache_create
  ...
  • Loading branch information
Linus Torvalds committed May 10, 2013
2 parents f755407 + 2f14f4b commit ec66715
Show file tree
Hide file tree
Showing 15 changed files with 442 additions and 116 deletions.
24 changes: 23 additions & 1 deletion drivers/md/dm-bufio.c
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode)
{
unsigned noio_flag;
void *ptr;

if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
Expand All @@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
}

*data_mode = DATA_MODE_VMALLOC;
return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);

/*
* __vmalloc allocates the data pages and auxiliary structures with
* gfp_flags that were specified, but pagetables are always allocated
* with GFP_KERNEL, no matter what was specified as gfp_mask.
*
* Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
* all allocations done by this process (including pagetables) are done
* as if GFP_NOIO was specified.
*/

if (gfp_mask & __GFP_NORETRY)
noio_flag = memalloc_noio_save();

ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);

if (gfp_mask & __GFP_NORETRY)
memalloc_noio_restore(noio_flag);

return ptr;
}

/*
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-cache-metadata.c
Original file line number Diff line number Diff line change
Expand Up @@ -1044,15 +1044,15 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
down_read(&cmd->root_lock);
memcpy(stats, &cmd->stats, sizeof(*stats));
*stats = cmd->stats;
up_read(&cmd->root_lock);
}

void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
down_write(&cmd->root_lock);
memcpy(&cmd->stats, stats, sizeof(*stats));
cmd->stats = *stats;
up_write(&cmd->root_lock);
}

Expand Down
4 changes: 2 additions & 2 deletions drivers/md/dm-cache-policy.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@ struct dm_cache_policy {
*
* Must not block.
*
* Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
* would be typical).
* Returns 0 if in cache, -ENOENT if not, < 0 for other errors
* (-EWOULDBLOCK would be typical).
*/
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);

Expand Down
100 changes: 53 additions & 47 deletions drivers/md/dm-cache-target.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ struct per_bio_data {
/*
* writethrough fields. These MUST remain at the end of this
* structure and the 'cache' member must be the first as it
* is used to determine the offsetof the writethrough fields.
* is used to determine the offset of the writethrough fields.
*/
struct cache *cache;
dm_cblock_t cblock;
Expand Down Expand Up @@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
return r;
}

/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/

static bool is_dirty(struct cache *cache, dm_cblock_t b)
{
Expand All @@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
}

/*----------------------------------------------------------------*/

static bool block_size_is_power_of_two(struct cache *cache)
{
return cache->sectors_per_block_shift >= 0;
Expand Down Expand Up @@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)

/*
* We can't issue this bio directly, since we're in interrupt
* context. So it get's put on a bio list for processing by the
* context. So it gets put on a bio list for processing by the
* worker thread.
*/
defer_writethrough_bio(pb->cache, bio);
Expand Down Expand Up @@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
policy_tick(cache->policy);
wake_worker(cache);
queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
}
Expand Down Expand Up @@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,

static struct kmem_cache *migration_cache;

static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
#define NOT_CORE_OPTION 1

static int process_config_option(struct cache *cache, const char *key, const char *value)
{
unsigned long tmp;

if (!strcasecmp(key, "migration_threshold")) {
if (kstrtoul(value, 10, &tmp))
return -EINVAL;

cache->migration_threshold = tmp;
return 0;
}

return NOT_CORE_OPTION;
}

static int set_config_value(struct cache *cache, const char *key, const char *value)
{
int r = process_config_option(cache, key, value);

if (r == NOT_CORE_OPTION)
r = policy_set_config_value(cache->policy, key, value);

if (r)
DMWARN("bad config value for %s: %s", key, value);

return r;
}

static int set_config_values(struct cache *cache, int argc, const char **argv)
{
int r = 0;

Expand All @@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
}

while (argc) {
r = policy_set_config_value(p, argv[0], argv[1]);
if (r) {
DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
argv[0], argv[1]);
return r;
}
r = set_config_value(cache, argv[0], argv[1]);
if (r)
break;

argc -= 2;
argv += 2;
Expand All @@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{
int r;

cache->policy = dm_cache_policy_create(ca->policy_name,
cache->cache_size,
cache->origin_sectors,
Expand All @@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
return -ENOMEM;
}

r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
if (r) {
*error = "Error setting cache policy's config values";
dm_cache_policy_destroy(cache->policy);
cache->policy = NULL;
}

return r;
return 0;
}

/*
Expand Down Expand Up @@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
return discard_block_size;
}

#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
#define DEFAULT_MIGRATION_THRESHOLD 2048

static int cache_create(struct cache_args *ca, struct cache **result)
{
Expand All @@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true;

memcpy(&cache->features, &ca->features, sizeof(cache->features));
cache->features = ca->features;
ti->per_bio_data_size = get_per_bio_data_size(cache);

cache->callbacks.congested_fn = cache_is_congested;
Expand Down Expand Up @@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
r = create_cache_policy(cache, ca, error);
if (r)
goto bad;

cache->policy_nr_args = ca->policy_argc;
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;

r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
if (r) {
*error = "Error setting cache policy's config values";
goto bad;
}

cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
ca->block_size, may_format,
Expand All @@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);

r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
Expand Down Expand Up @@ -2517,41 +2545,19 @@ static void cache_status(struct dm_target *ti, status_type_t type,
DMEMIT("Error");
}

#define NOT_CORE_OPTION 1

static int process_config_option(struct cache *cache, char **argv)
{
unsigned long tmp;

if (!strcasecmp(argv[0], "migration_threshold")) {
if (kstrtoul(argv[1], 10, &tmp))
return -EINVAL;

cache->migration_threshold = tmp;
return 0;
}

return NOT_CORE_OPTION;
}

/*
* Supports <key> <value>.
*
* The key migration_threshold is supported by the cache target core.
*/
static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
int r;
struct cache *cache = ti->private;

if (argc != 2)
return -EINVAL;

r = process_config_option(cache, argv);
if (r == NOT_CORE_OPTION)
return policy_set_config_value(cache->policy, argv[0], argv[1]);

return r;
return set_config_value(cache, argv[0], argv[1]);
}

static int cache_iterate_devices(struct dm_target *ti,
Expand Down Expand Up @@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)

static struct target_type cache_target = {
.name = "cache",
.version = {1, 1, 0},
.version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
Expand Down
1 change: 1 addition & 0 deletions drivers/md/dm-mpath.c
Original file line number Diff line number Diff line change
Expand Up @@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,

ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;

return 0;

Expand Down
1 change: 1 addition & 0 deletions drivers/md/dm-snap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
if (!s->pending_pool) {
ti->error = "Could not allocate mempool for pending exceptions";
r = -ENOMEM;
goto bad_pending_pool;
}

Expand Down
11 changes: 6 additions & 5 deletions drivers/md/dm-stripe.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct stripe_c *sc;
sector_t width;
sector_t width, tmp_len;
uint32_t stripes;
uint32_t chunk_size;
int r;
Expand All @@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}

width = ti->len;
if (sector_div(width, chunk_size)) {
if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
"chunk size";
"number of stripes";
return -EINVAL;
}

if (sector_div(width, stripes)) {
tmp_len = width;
if (sector_div(tmp_len, chunk_size)) {
ti->error = "Target length not divisible by "
"number of stripes";
"chunk size";
return -EINVAL;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
return false;

if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false;
}

Expand Down
Loading

0 comments on commit ec66715

Please sign in to comment.