Skip to content

Commit

Permalink
dm crypt: optionally support larger encryption sector size
Browse files Browse the repository at this point in the history
Add  optional "sector_size"  parameter that specifies encryption sector
size (atomic unit of block device encryption).

Parameter can be in range 512 - 4096 bytes and must be power of two.
For compatibility reasons, the maximal IO must fit into the page limit,
so the limit is set to the minimal page size possible (4096 bytes).

NOTE: this device cannot yet be handled by cryptsetup if this parameter
is set.

IV for the sector is calculated from the 512 bytes sector offset unless
the iv_large_sectors option is used.

Test script using dmsetup:

  DEV="/dev/sdb"
  DEV_SIZE=$(blockdev --getsz $DEV)
  KEY="9c1185a5c5e9fc54612808977ee8f548b2258d31ddadef707ba62c166051b9e3cd0294c27515f2bccee924e8823ca6e124b8fc3167ed478bca702babe4e130ac"
  BLOCK_SIZE=4096

  # dmsetup create test_crypt --table "0 $DEV_SIZE crypt aes-xts-plain64 $KEY 0 $DEV 0 1 sector_size:$BLOCK_SIZE"
  # dmsetup table --showkeys test_crypt

Signed-off-by: Milan Broz <gmazyland@gmail.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
  • Loading branch information
Milan Broz authored and Mike Snitzer committed Mar 24, 2017
1 parent 33d2f09 commit 8f0009a
Show file tree
Hide file tree
Showing 2 changed files with 96 additions and 23 deletions.
14 changes: 14 additions & 0 deletions Documentation/device-mapper/dm-crypt.txt
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,20 @@ integrity:<bytes>:<type>
integrity for the encrypted device. The additional space is then
used for storing authentication tag (and persistent IV if needed).

sector_size:<bytes>
Use <bytes> as the encryption unit instead of 512 bytes sectors.
This option can be in range 512 - 4096 bytes and must be power of two.
Virtual device will announce this size as a minimal IO and logical sector.

iv_large_sectors
IV generators will use sector number counted in <sector_size> units
instead of default 512 bytes sectors.

For example, if <sector_size> is 4096 bytes, plain64 IV for the second
sector will be 8 (without flag) and 1 if iv_large_sectors is present.
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
if this flag is specified.

Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
Expand Down
105 changes: 82 additions & 23 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,

enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
};

/*
Expand Down Expand Up @@ -171,6 +172,7 @@ struct crypt_config {
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
unsigned int sector_size;

/* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private;
Expand Down Expand Up @@ -524,6 +526,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;

if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for LMK";
return -EINVAL;
}

lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(lmk->hash_tfm)) {
ti->error = "Error initializing LMK hash";
Expand Down Expand Up @@ -677,6 +684,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;

if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for TCW";
return -EINVAL;
}

if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
ti->error = "Wrong key size for TCW";
return -EINVAL;
Expand Down Expand Up @@ -1037,15 +1049,20 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
unsigned int data_len = 1 << SECTOR_SHIFT;
u8 *iv, *org_iv, *tag_iv, *tag;
uint64_t *sector;
int r = 0;

BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);

/* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
return -EIO;

dmreq = dmreq_of_req(cc, req);
dmreq->iv_sector = ctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
dmreq->ctx = ctx;

*org_tag_of_dmreq(cc, dmreq) = tag_offset;
Expand All @@ -1066,13 +1083,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
sg_init_table(dmreq->sg_in, 4);
sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, data_len, bv_in.bv_offset);
sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);

sg_init_table(dmreq->sg_out, 4);
sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, data_len, bv_out.bv_offset);
sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);

if (cc->iv_gen_ops) {
Expand All @@ -1094,14 +1111,14 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
if (bio_data_dir(ctx->bio_in) == WRITE) {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
data_len, iv);
cc->sector_size, iv);
r = crypto_aead_encrypt(req);
if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
} else {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
data_len + cc->integrity_tag_size, iv);
cc->sector_size + cc->integrity_tag_size, iv);
r = crypto_aead_decrypt(req);
}

Expand All @@ -1112,8 +1129,8 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);

bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);

return r;
}
Expand All @@ -1127,13 +1144,18 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq;
unsigned int data_len = 1 << SECTOR_SHIFT;
u8 *iv, *org_iv, *tag_iv;
uint64_t *sector;
int r = 0;

/* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
return -EIO;

dmreq = dmreq_of_req(cc, req);
dmreq->iv_sector = ctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
dmreq->ctx = ctx;

*org_tag_of_dmreq(cc, dmreq) = tag_offset;
Expand All @@ -1150,10 +1172,10 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
sg_out = &dmreq->sg_out[0];

sg_init_table(sg_in, 1);
sg_set_page(sg_in, bv_in.bv_page, data_len, bv_in.bv_offset);
sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);

sg_init_table(sg_out, 1);
sg_set_page(sg_out, bv_out.bv_page, data_len, bv_out.bv_offset);
sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);

if (cc->iv_gen_ops) {
/* For READs use IV stored in integrity metadata */
Expand All @@ -1171,7 +1193,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
memcpy(iv, org_iv, cc->iv_size);
}

skcipher_request_set_crypt(req, sg_in, sg_out, data_len, iv);
skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);

if (bio_data_dir(ctx->bio_in) == WRITE)
r = crypto_skcipher_encrypt(req);
Expand All @@ -1181,8 +1203,8 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);

bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);

return r;
}
Expand Down Expand Up @@ -1268,14 +1290,14 @@ static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx)
{
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size / (1 << SECTOR_SHIFT);
int r;

atomic_set(&ctx->cc_pending, 1);

while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {

crypt_alloc_req(cc, ctx);

atomic_inc(&ctx->cc_pending);

if (crypt_integrity_aead(cc))
Expand All @@ -1298,16 +1320,16 @@ static int crypt_convert(struct crypt_config *cc,
*/
case -EINPROGRESS:
ctx->r.req = NULL;
ctx->cc_sector++;
tag_offset++;
ctx->cc_sector += sector_step;
tag_offset += sector_step;
continue;
/*
* The request was already processed (synchronously).
*/
case 0:
atomic_dec(&ctx->cc_pending);
ctx->cc_sector++;
tag_offset++;
ctx->cc_sector += sector_step;
tag_offset += sector_step;
cond_resched();
continue;
/*
Expand Down Expand Up @@ -2506,10 +2528,11 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
struct crypt_config *cc = ti->private;
struct dm_arg_set as;
static struct dm_arg _args[] = {
{0, 3, "Invalid number of feature args"},
{0, 6, "Invalid number of feature args"},
};
unsigned int opt_params, val;
const char *opt_string, *sval;
char dummy;
int ret;

/* Optional parameters */
Expand Down Expand Up @@ -2552,7 +2575,16 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
if (!cc->cipher_auth)
return -ENOMEM;
} else {
} else if (sscanf(opt_string, "sector_size:%u%c", &cc->sector_size, &dummy) == 1) {
if (cc->sector_size < (1 << SECTOR_SHIFT) ||
cc->sector_size > 4096 ||
(1 << ilog2(cc->sector_size) != cc->sector_size)) {
ti->error = "Invalid feature value for sector_size";
return -EINVAL;
}
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
else {
ti->error = "Invalid feature arguments";
return -EINVAL;
}
Expand Down Expand Up @@ -2592,6 +2624,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
cc->key_size = key_size;
cc->sector_size = (1 << SECTOR_SHIFT);

ti->private = cc;

Expand Down Expand Up @@ -2664,7 +2697,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mutex_init(&cc->bio_alloc_lock);

ret = -EINVAL;
if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
(tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
ti->error = "Invalid iv_offset sector";
goto bad;
}
Expand Down Expand Up @@ -2765,19 +2799,30 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
(bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));

/*
* Ensure that bio is a multiple of internal sector encryption size
* and is aligned to this size as defined in IO hints.
*/
if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
return -EIO;

if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
return -EIO;

io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));

if (cc->on_disk_tag_size) {
unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio);

if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
unlikely(!(io->integrity_metadata = kmalloc(tag_len,
unlikely(!(io->integrity_metadata = kzalloc(tag_len,
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true;
memset(io->integrity_metadata, 0, cc->tag_pool_max_sectors * (1 << SECTOR_SHIFT));
}
}

Expand Down Expand Up @@ -2825,6 +2870,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += (cc->sector_size != (1 << SECTOR_SHIFT)) ? 1 : 0;
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
if (cc->on_disk_tag_size)
num_feature_args++;
if (num_feature_args) {
Expand All @@ -2837,6 +2884,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" submit_from_crypt_cpus");
if (cc->on_disk_tag_size)
DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
if (cc->sector_size != (1 << SECTOR_SHIFT))
DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
}

break;
Expand Down Expand Up @@ -2926,18 +2977,26 @@ static int crypt_iterate_devices(struct dm_target *ti,

static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct crypt_config *cc = ti->private;

/*
* Unfortunate constraint that is required to avoid the potential
* for exceeding underlying device's max_segments limits -- due to
* crypt_alloc_buffer() possibly allocating pages for the encryption
* bio that are not as physically contiguous as the original bio.
*/
limits->max_segment_size = PAGE_SIZE;

if (cc->sector_size != (1 << SECTOR_SHIFT)) {
limits->logical_block_size = cc->sector_size;
limits->physical_block_size = cc->sector_size;
blk_limits_io_min(limits, cc->sector_size);
}
}

static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 16, 0},
.version = {1, 17, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
Expand Down

0 comments on commit 8f0009a

Please sign in to comment.