Skip to content

Commit

Permalink
powerpc: Create disable_kernel_{fp,altivec,vsx,spe}()
Browse files Browse the repository at this point in the history
The enable_kernel_*() functions leave the relevant MSR bits enabled
until we exit the kernel sometime later. Create disable versions
that wrap the kernel use of FP, Altivec VSX or SPE.

While we don't want to disable it normally for performance reasons
(MSR writes are slow), it will be used for a debug boot option that
does this and catches bad uses in other areas of the kernel.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Anton Blanchard authored and Michael Ellerman committed Dec 1, 2015
1 parent a0e72cf commit dc4fbba
Show file tree
Hide file tree
Showing 15 changed files with 39 additions and 0 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/crypto/aes-spe-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ static void spe_begin(void)

static void spe_end(void)
{
disable_kernel_spe();
/* reenable preemption */
preempt_enable();
}
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/crypto/sha1-spe-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ static void spe_begin(void)

static void spe_end(void)
{
disable_kernel_spe();
/* reenable preemption */
preempt_enable();
}
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/crypto/sha256-spe-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ static void spe_begin(void)

static void spe_end(void)
{
disable_kernel_spe();
/* reenable preemption */
preempt_enable();
}
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/include/asm/switch_to.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ extern void enable_kernel_spe(void);
extern void load_up_spe(struct task_struct *);
extern void switch_booke_debug_regs(struct debug_reg *new_debug);

static inline void disable_kernel_fp(void) { }
static inline void disable_kernel_altivec(void) { }
static inline void disable_kernel_spe(void) { }
static inline void disable_kernel_vsx(void) { }

#ifdef CONFIG_PPC_FPU
extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *);
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/kernel/align.c
Original file line number Diff line number Diff line change
Expand Up @@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs)
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.x32.low32);
disable_kernel_fp();
preempt_enable();
#else
return 0;
Expand Down Expand Up @@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs)
preempt_disable();
enable_kernel_fp();
cvt_fd((float *)&data.x32.low32, &data.dd);
disable_kernel_fp();
preempt_enable();
#else
return 0;
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s_paired_singles.c
Original file line number Diff line number Diff line change
Expand Up @@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (rcomp)
kvmppc_set_cr(vcpu, cr);

disable_kernel_fp();
preempt_enable();

return emulated;
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/kvm/book3s_pr.c
Original file line number Diff line number Diff line change
Expand Up @@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
preempt_disable();
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
disable_kernel_fp();
t->fp_save_area = &vcpu->arch.fp;
preempt_enable();
}
Expand All @@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
preempt_disable();
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
disable_kernel_altivec();
t->vr_save_area = &vcpu->arch.vr;
preempt_enable();
#endif
Expand Down Expand Up @@ -788,13 +790,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
preempt_disable();
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
disable_kernel_fp();
preempt_enable();
}
#ifdef CONFIG_ALTIVEC
if (lost_ext & MSR_VEC) {
preempt_disable();
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
disable_kernel_altivec();
preempt_enable();
}
#endif
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/kvm/booke.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
preempt_disable();
enable_kernel_spe();
kvmppc_save_guest_spe(vcpu);
disable_kernel_spe();
vcpu->arch.shadow_msr &= ~MSR_SPE;
preempt_enable();
}
Expand All @@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
preempt_disable();
enable_kernel_spe();
kvmppc_load_guest_spe(vcpu);
disable_kernel_spe();
vcpu->arch.shadow_msr |= MSR_SPE;
preempt_enable();
}
Expand Down Expand Up @@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
if (!(current->thread.regs->msr & MSR_FP)) {
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
disable_kernel_fp();
current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP;
}
Expand Down Expand Up @@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
if (!(current->thread.regs->msr & MSR_VEC)) {
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
disable_kernel_altivec();
current->thread.vr_save_area = &vcpu->arch.vr;
current->thread.regs->msr |= MSR_VEC;
}
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/lib/vmx-helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ int enter_vmx_usercopy(void)
*/
int exit_vmx_usercopy(void)
{
disable_kernel_altivec();
pagefault_enable();
preempt_enable();
return 0;
Expand All @@ -70,6 +71,7 @@ int enter_vmx_copy(void)
*/
void *exit_vmx_copy(void *dest)
{
disable_kernel_altivec();
preempt_enable();
return dest;
}
4 changes: 4 additions & 0 deletions arch/powerpc/lib/xor_vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
v2 += 4;
} while (--lines > 0);

disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
Expand Down Expand Up @@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
v3 += 4;
} while (--lines > 0);

disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
Expand Down Expand Up @@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
v4 += 4;
} while (--lines > 0);

disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
Expand Down Expand Up @@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
v5 += 4;
} while (--lines > 0);

disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_5);
3 changes: 3 additions & 0 deletions drivers/crypto/vmx/aes.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();

Expand All @@ -104,6 +105,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
pagefault_disable();
enable_kernel_vsx();
aes_p8_encrypt(src, dst, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
Expand All @@ -120,6 +122,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
pagefault_disable();
enable_kernel_vsx();
aes_p8_decrypt(src, dst, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
Expand Down
3 changes: 3 additions & 0 deletions drivers/crypto/vmx/aes_cbc.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();

Expand Down Expand Up @@ -127,6 +128,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = blkcipher_walk_done(desc, &walk, nbytes);
}

disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
Expand Down Expand Up @@ -167,6 +169,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = blkcipher_walk_done(desc, &walk, nbytes);
}

disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
Expand Down
3 changes: 3 additions & 0 deletions drivers/crypto/vmx/aes_ctr.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();

ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
Expand All @@ -101,6 +102,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_disable();
enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();

crypto_xor(keystream, src, nbytes);
Expand Down Expand Up @@ -139,6 +141,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
AES_BLOCK_SIZE,
&ctx->enc_key,
walk.iv);
disable_kernel_vsx();
pagefault_enable();

/* We need to update IV mostly for last bytes/round */
Expand Down
4 changes: 4 additions & 0 deletions drivers/crypto/vmx/ghash.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
gcm_init_p8(ctx->htable, (const u64 *) key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
return crypto_shash_setkey(ctx->fallback, key, keylen);
Expand Down Expand Up @@ -150,6 +151,7 @@ static int p8_ghash_update(struct shash_desc *desc,
enable_kernel_vsx();
gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
src += GHASH_DIGEST_SIZE - dctx->bytes;
Expand All @@ -162,6 +164,7 @@ static int p8_ghash_update(struct shash_desc *desc,
pagefault_disable();
enable_kernel_vsx();
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
src += len;
Expand Down Expand Up @@ -192,6 +195,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
enable_kernel_vsx();
gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
dctx->bytes = 0;
Expand Down
1 change: 1 addition & 0 deletions lib/raid6/altivec.uc
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)

raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);

disable_kernel_altivec();
preempt_enable();
}

Expand Down

0 comments on commit dc4fbba

Please sign in to comment.