Skip to content

Commit

Permalink
crypto: x86/sha1_ssse3 - move SHA-1 SSSE3 implementation to base layer
Browse files Browse the repository at this point in the history
This removes all the boilerplate from the existing implementation,
and replaces it with calls into the base layer.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  • Loading branch information
Ard Biesheuvel authored and Herbert Xu committed Apr 10, 2015
1 parent 03802f6 commit 824b437
Showing 1 changed file with 28 additions and 111 deletions.
139 changes: 28 additions & 111 deletions arch/x86/crypto/sha1_ssse3_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <crypto/sha1_base.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
Expand All @@ -44,132 +44,51 @@ asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */

asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
unsigned int rounds);
unsigned int rounds);
#endif

static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int);


static int sha1_ssse3_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);

*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};

return 0;
}

static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len, unsigned int partial)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int done = 0;

sctx->count += len;

if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_transform_asm(sctx->state, sctx->buffer, 1);
}

if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;

sha1_transform_asm(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}

memcpy(sctx->buffer, data + done, len - done);

return 0;
}
static void (*sha1_transform_asm)(u32 *, const char *, unsigned int);

static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;

/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
if (!irq_fpu_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len);

return 0;
}
/* make sure casting to sha1_block_fn() is safe */
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);

if (!irq_fpu_usable()) {
res = crypto_sha1_update(desc, data, len);
} else {
kernel_fpu_begin();
res = __sha1_ssse3_update(desc, data, len, partial);
kernel_fpu_end();
}

return res;
}


/* Add padding and return the message digest. */
static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };

bits = cpu_to_be64(sctx->count << 3);

/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
if (!irq_fpu_usable()) {
crypto_sha1_update(desc, padding, padlen);
crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
} else {
kernel_fpu_begin();
/* We need to fill a whole block for __sha1_ssse3_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_ssse3_update(desc, padding, padlen, index);
}
__sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56);
kernel_fpu_end();
}

/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);

/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
kernel_fpu_begin();
sha1_base_do_update(desc, data, len,
(sha1_block_fn *)sha1_transform_asm);
kernel_fpu_end();

return 0;
}

static int sha1_ssse3_export(struct shash_desc *desc, void *out)
static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
if (!irq_fpu_usable())
return crypto_sha1_finup(desc, data, len, out);

memcpy(out, sctx, sizeof(*sctx));
kernel_fpu_begin();
if (len)
sha1_base_do_update(desc, data, len,
(sha1_block_fn *)sha1_transform_asm);
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_asm);
kernel_fpu_end();

return 0;
return sha1_base_finish(desc, out);
}

static int sha1_ssse3_import(struct shash_desc *desc, const void *in)
/* Add padding and return the message digest. */
static int sha1_ssse3_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);

memcpy(sctx, in, sizeof(*sctx));

return 0;
return sha1_ssse3_finup(desc, NULL, 0, out);
}

#ifdef CONFIG_AS_AVX2
Expand All @@ -186,13 +105,11 @@ static void sha1_apply_transform_avx2(u32 *digest, const char *data,

static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_ssse3_init,
.init = sha1_base_init,
.update = sha1_ssse3_update,
.final = sha1_ssse3_final,
.export = sha1_ssse3_export,
.import = sha1_ssse3_import,
.finup = sha1_ssse3_finup,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ssse3",
Expand Down

0 comments on commit 824b437

Please sign in to comment.