Skip to content

Commit

Permalink
s390/crypto: cleanup and move the header with the cpacf definitions
Browse files Browse the repository at this point in the history
The CPACF instructions are going be used in KVM as well, move the
defines and the inline functions from arch/s390/crypt/crypt_s390.h
to arch/s390/include/asm. Rename the header to cpacf.h and replace
the crypt_s390_xxx names with cpacf_xxx.

While we are at it, cleanup the header as well. The encoding for
the CPACF operations is odd, there is an enum for each of the CPACF
instructions with the hardware function code in the lower 8 bits of
each entry and a software defined number for the CPACF instruction
in the upper 8 bits. Remove the superfluous software number and
replace the enums with simple defines.

The crypt_s390_func_available() function tests for the presence
of a specific CPACF operations. The new name of the function is
cpacf_query and it works slightly different than before. It gets
passed an opcode of an CPACF instruction and a function code for
this instruction. The facility_mask parameter is gone, the opcode
is used to find the correct MSA facility bit to check if the CPACF
instruction itself is available. If it is the query function of the
given instruction is used to test if the requested CPACF operation
is present.

Acked-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
  • Loading branch information
Martin Schwidefsky committed Apr 15, 2016
1 parent f9dc447 commit c7d4d25
Show file tree
Hide file tree
Showing 10 changed files with 556 additions and 660 deletions.
117 changes: 56 additions & 61 deletions arch/s390/crypto/aes_s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <crypto/xts.h>
#include "crypt_s390.h"
#include <asm/cpacf.h>

#define AES_KEYLEN_128 1
#define AES_KEYLEN_192 2
Expand Down Expand Up @@ -145,16 +145,16 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)

switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
}
Expand All @@ -170,16 +170,16 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)

switch (sctx->key_len) {
case 16:
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 24:
crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
case 32:
crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
AES_BLOCK_SIZE);
cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
AES_BLOCK_SIZE);
break;
}
}
Expand Down Expand Up @@ -212,7 +212,7 @@ static void fallback_exit_cip(struct crypto_tfm *tfm)
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
Expand Down Expand Up @@ -298,16 +298,16 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

switch (key_len) {
case 16:
sctx->enc = KM_AES_128_ENCRYPT;
sctx->dec = KM_AES_128_DECRYPT;
sctx->enc = CPACF_KM_AES_128_ENC;
sctx->dec = CPACF_KM_AES_128_DEC;
break;
case 24:
sctx->enc = KM_AES_192_ENCRYPT;
sctx->dec = KM_AES_192_DECRYPT;
sctx->enc = CPACF_KM_AES_192_ENC;
sctx->dec = CPACF_KM_AES_192_DEC;
break;
case 32:
sctx->enc = KM_AES_256_ENCRYPT;
sctx->dec = KM_AES_256_DECRYPT;
sctx->enc = CPACF_KM_AES_256_ENC;
sctx->dec = CPACF_KM_AES_256_DEC;
break;
}

Expand All @@ -326,7 +326,7 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;

ret = crypt_s390_km(func, param, out, in, n);
ret = cpacf_km(func, param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;

Expand Down Expand Up @@ -393,7 +393,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ecb */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
Expand Down Expand Up @@ -427,16 +427,16 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

switch (key_len) {
case 16:
sctx->enc = KMC_AES_128_ENCRYPT;
sctx->dec = KMC_AES_128_DECRYPT;
sctx->enc = CPACF_KMC_AES_128_ENC;
sctx->dec = CPACF_KMC_AES_128_DEC;
break;
case 24:
sctx->enc = KMC_AES_192_ENCRYPT;
sctx->dec = KMC_AES_192_DECRYPT;
sctx->enc = CPACF_KMC_AES_192_ENC;
sctx->dec = CPACF_KMC_AES_192_DEC;
break;
case 32:
sctx->enc = KMC_AES_256_ENCRYPT;
sctx->dec = KMC_AES_256_DECRYPT;
sctx->enc = CPACF_KMC_AES_256_ENC;
sctx->dec = CPACF_KMC_AES_256_DEC;
break;
}

Expand Down Expand Up @@ -465,7 +465,7 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;

ret = crypt_s390_kmc(func, &param, out, in, n);
ret = cpacf_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;

Expand Down Expand Up @@ -509,7 +509,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + cbc */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
Expand Down Expand Up @@ -596,8 +596,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

switch (key_len) {
case 32:
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_128_ENC;
xts_ctx->dec = CPACF_KM_XTS_128_DEC;
memcpy(xts_ctx->key + 16, in_key, 16);
memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
Expand All @@ -607,8 +607,8 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_fallback_setkey(tfm, in_key, key_len);
break;
case 64:
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
xts_ctx->enc = CPACF_KM_XTS_256_ENC;
xts_ctx->dec = CPACF_KM_XTS_256_DEC;
memcpy(xts_ctx->key, in_key, 32);
memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
Expand Down Expand Up @@ -643,7 +643,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
/* remove decipher modifier bit from 'func' and call PCC */
ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;

Expand All @@ -655,7 +656,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;

ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;

Expand Down Expand Up @@ -721,7 +722,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + xts */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
Expand Down Expand Up @@ -751,16 +752,16 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,

switch (key_len) {
case 16:
sctx->enc = KMCTR_AES_128_ENCRYPT;
sctx->dec = KMCTR_AES_128_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_128_ENC;
sctx->dec = CPACF_KMCTR_AES_128_DEC;
break;
case 24:
sctx->enc = KMCTR_AES_192_ENCRYPT;
sctx->dec = KMCTR_AES_192_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_192_ENC;
sctx->dec = CPACF_KMCTR_AES_192_DEC;
break;
case 32:
sctx->enc = KMCTR_AES_256_ENCRYPT;
sctx->dec = KMCTR_AES_256_DECRYPT;
sctx->enc = CPACF_KMCTR_AES_256_ENC;
sctx->dec = CPACF_KMCTR_AES_256_DEC;
break;
}

Expand Down Expand Up @@ -804,8 +805,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
n = __ctrblk_init(ctrptr, nbytes);
else
n = AES_BLOCK_SIZE;
ret = crypt_s390_kmctr(func, sctx->key, out, in,
n, ctrptr);
ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
if (ret < 0 || ret != n) {
if (ctrptr == ctrblk)
spin_unlock(&ctrblk_lock);
Expand Down Expand Up @@ -837,8 +837,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrbuf);
ret = cpacf_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
Expand Down Expand Up @@ -875,7 +875,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_priority = 400, /* combo: aes + ctr */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
Expand All @@ -899,11 +899,11 @@ static int __init aes_s390_init(void)
{
int ret;

if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
keylen_flag |= AES_KEYLEN_128;
if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
keylen_flag |= AES_KEYLEN_192;
if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
keylen_flag |= AES_KEYLEN_256;

if (!keylen_flag)
Expand All @@ -926,22 +926,17 @@ static int __init aes_s390_init(void)
if (ret)
goto cbc_aes_err;

if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KM_XTS_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
ret = crypto_register_alg(&xts_aes_alg);
if (ret)
goto xts_aes_err;
xts_aes_alg_reg = 1;
}

if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
Expand Down
Loading

0 comments on commit c7d4d25

Please sign in to comment.