Skip to content

Commit

Permalink
crypto: twofish-avx - tune assembler code for more performance
Browse files Browse the repository at this point in the history
Patch replaces 'movb' instructions with 'movzbl' to break false register
dependencies and interleaves instructions better for out-of-order scheduling.

Tested on Intel Core i5-2450M and AMD FX-8100.

tcrypt ECB results:

Intel Core i5-2450M:

size    old-vs-new      new-vs-3way     old-vs-3way
        enc     dec     enc     dec     enc     dec
256     1.12x   1.13x   1.36x   1.37x   1.21x   1.22x
1k      1.14x   1.14x   1.48x   1.49x   1.29x   1.31x
8k      1.14x   1.14x   1.50x   1.52x   1.32x   1.33x

AMD FX-8100:

size    old-vs-new      new-vs-3way     old-vs-3way
        enc     dec     enc     dec     enc     dec
256     1.10x   1.11x   1.01x   1.01x   0.92x   0.91x
1k      1.11x   1.12x   1.08x   1.07x   0.97x   0.96x
8k      1.11x   1.13x   1.10x   1.08x   0.99x   0.97x

[v2]
 - Do instruction interleaving another way to avoid adding new FPU<=>CPU
   register moves as these cause performance drop on Bulldozer.
 - Further interleaving improvements for better out-of-order scheduling.

Tested-by: Borislav Petkov <bp@alien8.de>
Cc: Johannes Goetzfried <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  • Loading branch information
Jussi Kivilinna authored and Herbert Xu committed Sep 6, 2012
1 parent 49d30d3 commit f94a73f
Showing 1 changed file with 142 additions and 85 deletions.
227 changes: 142 additions & 85 deletions arch/x86/crypto/twofish-avx-x86_64-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
*
* Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
Expand Down Expand Up @@ -47,16 +49,22 @@
#define RC2 %xmm6
#define RD2 %xmm7

#define RX %xmm8
#define RY %xmm9
#define RX0 %xmm8
#define RY0 %xmm9

#define RX1 %xmm10
#define RY1 %xmm11

#define RK1 %xmm10
#define RK2 %xmm11
#define RK1 %xmm12
#define RK2 %xmm13

#define RID1 %rax
#define RID1b %al
#define RID2 %rbx
#define RID2b %bl
#define RT %xmm14
#define RR %xmm15

#define RID1 %rbp
#define RID1d %ebp
#define RID2 %rsi
#define RID2d %esi

#define RGI1 %rdx
#define RGI1bl %dl
Expand All @@ -65,6 +73,13 @@
#define RGI2bl %cl
#define RGI2bh %ch

#define RGI3 %rax
#define RGI3bl %al
#define RGI3bh %ah
#define RGI4 %rbx
#define RGI4bl %bl
#define RGI4bh %bh

#define RGS1 %r8
#define RGS1d %r8d
#define RGS2 %r9
Expand All @@ -73,89 +88,123 @@
#define RGS3d %r10d


#define lookup_32bit(t0, t1, t2, t3, src, dst) \
movb src ## bl, RID1b; \
movb src ## bh, RID2b; \
movl t0(CTX, RID1, 4), dst ## d; \
xorl t1(CTX, RID2, 4), dst ## d; \
#define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
movzbl src ## bl, RID1d; \
movzbl src ## bh, RID2d; \
shrq $16, src; \
movb src ## bl, RID1b; \
movb src ## bh, RID2b; \
movl t0(CTX, RID1, 4), dst ## d; \
movl t1(CTX, RID2, 4), RID2d; \
movzbl src ## bl, RID1d; \
xorl RID2d, dst ## d; \
movzbl src ## bh, RID2d; \
interleave_op(il_reg); \
xorl t2(CTX, RID1, 4), dst ## d; \
xorl t3(CTX, RID2, 4), dst ## d;

#define G(a, x, t0, t1, t2, t3) \
vmovq a, RGI1; \
vpsrldq $8, a, x; \
vmovq x, RGI2; \
#define dummy(d) /* do nothing */

#define shr_next(reg) \
shrq $16, reg;

#define G(gi1, gi2, x, t0, t1, t2, t3) \
lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1); \
lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2); \
\
lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none); \
shlq $32, RGS2; \
orq RGS1, RGS2; \
lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none); \
shlq $32, RGS1; \
orq RGS1, RGS3;

#define round_head_2(a, b, x1, y1, x2, y2) \
vmovq b ## 1, RGI3; \
vpextrq $1, b ## 1, RGI4; \
\
lookup_32bit(t0, t1, t2, t3, RGI1, RGS1); \
shrq $16, RGI1; \
lookup_32bit(t0, t1, t2, t3, RGI1, RGS2); \
shlq $32, RGS2; \
orq RGS1, RGS2; \
G(RGI1, RGI2, x1, s0, s1, s2, s3); \
vmovq a ## 2, RGI1; \
vpextrq $1, a ## 2, RGI2; \
vmovq RGS2, x1; \
vpinsrq $1, RGS3, x1, x1; \
\
lookup_32bit(t0, t1, t2, t3, RGI2, RGS1); \
shrq $16, RGI2; \
lookup_32bit(t0, t1, t2, t3, RGI2, RGS3); \
shlq $32, RGS3; \
orq RGS1, RGS3; \
G(RGI3, RGI4, y1, s1, s2, s3, s0); \
vmovq b ## 2, RGI3; \
vpextrq $1, b ## 2, RGI4; \
vmovq RGS2, y1; \
vpinsrq $1, RGS3, y1, y1; \
\
vmovq RGS2, x; \
vpinsrq $1, RGS3, x, x;
G(RGI1, RGI2, x2, s0, s1, s2, s3); \
vmovq RGS2, x2; \
vpinsrq $1, RGS3, x2, x2; \
\
G(RGI3, RGI4, y2, s1, s2, s3, s0); \
vmovq RGS2, y2; \
vpinsrq $1, RGS3, y2, y2;

#define encround(a, b, c, d, x, y) \
G(a, x, s0, s1, s2, s3); \
G(b, y, s1, s2, s3, s0); \
#define encround_tail(a, b, c, d, x, y, prerotate) \
vpaddd x, y, x; \
vpaddd x, RK1, RT;\
prerotate(b); \
vpxor RT, c, c; \
vpaddd y, x, y; \
vpaddd x, RK1, x; \
vpaddd y, RK2, y; \
vpxor x, c, c; \
vpsrld $1, c, x; \
vpsrld $1, c, RT; \
vpslld $(32 - 1), c, c; \
vpor c, x, c; \
vpslld $1, d, x; \
vpsrld $(32 - 1), d, d; \
vpor d, x, d; \
vpxor d, y, d;

#define decround(a, b, c, d, x, y) \
G(a, x, s0, s1, s2, s3); \
G(b, y, s1, s2, s3, s0); \
vpor c, RT, c; \
vpxor d, y, d; \

#define decround_tail(a, b, c, d, x, y, prerotate) \
vpaddd x, y, x; \
vpaddd x, RK1, RT;\
prerotate(a); \
vpxor RT, c, c; \
vpaddd y, x, y; \
vpaddd y, RK2, y; \
vpxor d, y, d; \
vpsrld $1, d, y; \
vpslld $(32 - 1), d, d; \
vpor d, y, d; \
vpslld $1, c, y; \
vpsrld $(32 - 1), c, c; \
vpor c, y, c; \
vpaddd x, RK1, x; \
vpxor x, c, c;

#define encrypt_round(n, a, b, c, d) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
encround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \
encround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY);

#define decrypt_round(n, a, b, c, d) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
decround(a ## 1, b ## 1, c ## 1, d ## 1, RX, RY); \
decround(a ## 2, b ## 2, c ## 2, d ## 2, RX, RY);

#define rotate_1l(x) \
vpslld $1, x, RR; \
vpsrld $(32 - 1), x, x; \
vpor x, RR, x;

#define preload_rgi(c) \
vmovq c, RGI1; \
vpextrq $1, c, RGI2;

#define encrypt_round(n, a, b, c, d, preload, prerotate) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
round_head_2(a, b, RX0, RY0, RX1, RY1); \
encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
preload(c ## 1); \
encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);

#define decrypt_round(n, a, b, c, d, preload, prerotate) \
vbroadcastss (k+4*(2*(n)))(CTX), RK1; \
vbroadcastss (k+4*(2*(n)+1))(CTX), RK2; \
round_head_2(a, b, RX0, RY0, RX1, RY1); \
decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
preload(c ## 1); \
decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);

#define encrypt_cycle(n) \
encrypt_round((2*n), RA, RB, RC, RD); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB);
encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);

#define encrypt_cycle_last(n) \
encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);

#define decrypt_cycle(n) \
decrypt_round(((2*n) + 1), RC, RD, RA, RB); \
decrypt_round((2*n), RA, RB, RC, RD);
decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);

#define decrypt_cycle_last(n) \
decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);

#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
vpunpckldq x1, x0, t0; \
Expand Down Expand Up @@ -216,17 +265,20 @@ __twofish_enc_blk_8way:
* %rcx: bool, if true: xor output
*/

pushq %rbp;
pushq %rbx;
pushq %rcx;

vmovdqu w(CTX), RK1;

leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2);
inpack_blocks(%rdx, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
preload_rgi(RA1);
rotate_1l(RD1);
inpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
rotate_1l(RD2);

xorq RID1, RID1;
xorq RID2, RID2;
movq %rsi, %r11;

encrypt_cycle(0);
encrypt_cycle(1);
Expand All @@ -235,26 +287,27 @@ __twofish_enc_blk_8way:
encrypt_cycle(4);
encrypt_cycle(5);
encrypt_cycle(6);
encrypt_cycle(7);
encrypt_cycle_last(7);

vmovdqu (w+4*4)(CTX), RK1;

popq %rcx;
popq %rbx;
popq %rbp;

leaq (4*4*4)(%rsi), %rax;
leaq (4*4*4)(%r11), %rax;

testb %cl, %cl;
jnz __enc_xor8;

outunpack_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);

ret;

__enc_xor8:
outunpack_xor_blocks(%rsi, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);

ret;

Expand All @@ -269,16 +322,19 @@ twofish_dec_blk_8way:
* %rdx: src
*/

pushq %rbp;
pushq %rbx;

vmovdqu (w+4*4)(CTX), RK1;

leaq (4*4*4)(%rdx), %rax;
inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX, RY, RK2);
inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX, RY, RK2);
inpack_blocks(%rdx, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
preload_rgi(RC1);
rotate_1l(RA1);
inpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
rotate_1l(RA2);

xorq RID1, RID1;
xorq RID2, RID2;
movq %rsi, %r11;

decrypt_cycle(7);
decrypt_cycle(6);
Expand All @@ -287,14 +343,15 @@ twofish_dec_blk_8way:
decrypt_cycle(3);
decrypt_cycle(2);
decrypt_cycle(1);
decrypt_cycle(0);
decrypt_cycle_last(0);

vmovdqu (w)(CTX), RK1;

popq %rbx;
popq %rbp;

leaq (4*4*4)(%rsi), %rax;
outunpack_blocks(%rsi, RA1, RB1, RC1, RD1, RK1, RX, RY, RK2);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX, RY, RK2);
leaq (4*4*4)(%r11), %rax;
outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);

ret;

0 comments on commit f94a73f

Please sign in to comment.