Skip to content

Commit

Permalink
x86/fpu: Remove irq_ts_save() and irq_ts_restore()
Browse files Browse the repository at this point in the history
Now that lazy FPU is gone, we don't use CR0.TS (except possibly in
KVM guest mode).  Remove irq_ts_save(), irq_ts_restore(), and all of
their callers.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm list <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/70b9b9e7ba70659bedcb08aba63d0f9214f338f2.1477951965.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Andy Lutomirski authored and Ingo Molnar committed Nov 1, 2016
1 parent fc560a8 commit 5a83d60
Show file tree
Hide file tree
Showing 5 changed files with 4 additions and 84 deletions.
10 changes: 0 additions & 10 deletions arch/x86/include/asm/fpu/api.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,6 @@ extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);

/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
* get used from interrupt context as well. To prevent these kernel instructions
* in interrupt context interacting wrongly with other user/kernel fpu usage, we
* should use them only in the context of irq_ts_save/restore()
*/
extern int irq_ts_save(void);
extern void irq_ts_restore(int TS_state);

/*
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
*
Expand Down
29 changes: 0 additions & 29 deletions arch/x86/kernel/fpu/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,35 +137,6 @@ void kernel_fpu_end(void)
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);

/*
* CR0::TS save/restore functions:
*/
int irq_ts_save(void)
{
/*
* If in process context and not atomic, we can take a spurious DNA fault.
* Otherwise, doing clts() in process context requires disabling preemption
* or some heavy lifting like kernel_fpu_begin()
*/
if (!in_atomic())
return 0;

if (read_cr0() & X86_CR0_TS) {
clts();
return 1;
}

return 0;
}
EXPORT_SYMBOL_GPL(irq_ts_save);

void irq_ts_restore(int TS_state)
{
if (TS_state)
stts();
}
EXPORT_SYMBOL_GPL(irq_ts_restore);

/*
* Save the FPU state (mark it for reload if necessary):
*
Expand Down
8 changes: 2 additions & 6 deletions drivers/char/hw_random/via-rng.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,21 +70,17 @@ enum {
* until we have 4 bytes, thus returning a u32 at a time,
* instead of the current u8-at-a-time.
*
* Padlock instructions can generate a spurious DNA fault, so
* we have to call them in the context of irq_ts_save/restore()
* Padlock instructions can generate a spurious DNA fault, but the
* kernel doesn't use CR0.TS, so this doesn't matter.
*/

static inline u32 xstore(u32 *addr, u32 edx_in)
{
u32 eax_out;
int ts_state;

ts_state = irq_ts_save();

asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));

irq_ts_restore(ts_state);
return eax_out;
}

Expand Down
23 changes: 2 additions & 21 deletions drivers/crypto/padlock-aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword)

/*
* While the padlock instructions don't use FP/SSE registers, they
* generate a spurious DNA fault when cr0.ts is '1'. These instructions
* should be used only inside the irq_ts_save/restore() context
* generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
* the kernel doesn't use CR0.TS.
*/

static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
Expand Down Expand Up @@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
int ts_state;

padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}

static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
int ts_state;

padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}

Expand Down Expand Up @@ -346,22 +340,19 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;

padlock_reset_key(&ctx->cword.encrypt);

blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);

ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);

padlock_store_cword(&ctx->cword.encrypt);

Expand All @@ -375,22 +366,19 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;

padlock_reset_key(&ctx->cword.decrypt);

blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);

ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);

padlock_store_cword(&ctx->cword.encrypt);

Expand Down Expand Up @@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;

padlock_reset_key(&ctx->cword.encrypt);

blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);

ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
Expand All @@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
irq_ts_restore(ts_state);

padlock_store_cword(&ctx->cword.decrypt);

Expand All @@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
int ts_state;

padlock_reset_key(&ctx->cword.encrypt);

blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);

ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
Expand All @@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}

irq_ts_restore(ts_state);

padlock_store_cword(&ctx->cword.encrypt);

return err;
Expand Down
18 changes: 0 additions & 18 deletions drivers/crypto/padlock-sha.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
struct sha1_state state;
unsigned int space;
unsigned int leftover;
int ts_state;
int err;

dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
Expand Down Expand Up @@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,

memcpy(result, &state.state, SHA1_DIGEST_SIZE);

/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
irq_ts_restore(ts_state);

padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);

Expand Down Expand Up @@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
struct sha256_state state;
unsigned int space;
unsigned int leftover;
int ts_state;
int err;

dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
Expand Down Expand Up @@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,

memcpy(result, &state.state, SHA256_DIGEST_SIZE);

/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
irq_ts_restore(ts_state);

padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);

Expand Down Expand Up @@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
int ts_state;

partial = sctx->count & 0x3f;
sctx->count += len;
Expand All @@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst) \
: "a"((long)-1), "c"((unsigned long)1));
irq_ts_restore(ts_state);
done += SHA1_BLOCK_SIZE;
src = data + done;
}

/* Process the left bytes from the input data */
if (len - done >= SHA1_BLOCK_SIZE) {
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
src = data + done;
}
Expand Down Expand Up @@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
int ts_state;

partial = sctx->count & 0x3f;
sctx->count += len;
Expand All @@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1), "c"((unsigned long)1));
irq_ts_restore(ts_state);
done += SHA256_BLOCK_SIZE;
src = data + done;
}

/* Process the left bytes from input data*/
if (len - done >= SHA256_BLOCK_SIZE) {
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / 64)));
irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % 64);
src = data + done;
}
Expand Down

0 comments on commit 5a83d60

Please sign in to comment.