Skip to content

Commit

Permalink
[PATCH] powerpc: Fix handling of fpscr on 64-bit
Browse files Browse the repository at this point in the history
The recent merge of fpu.S broken the handling of fpscr for
ARCH=powerpc and CONFIG_PPC64=y.  FP registers could be corrupted,
leading to strange random application crashes.

The confusion arises, because the thread_struct has (and requires) a
64-bit area to save the fpscr, because we use load/store double
instructions to get it in to/out of the FPU.  However, only the low
32-bits are actually used, so we want to treat it as a 32-bit quantity
when manipulating its bits to avoid extra load/stores on 32-bit.  This
patch replaces the current definition with a structure of two 32-bit
quantities (pad and val), to clarify things as much as is possible.
The 'val' field is used when manipulating bits, the structure itself
is used when obtaining the address for loading/unloading the value
from the FPU.

While we're at it, consolidate the 4 (!) almost identical versions of
cvt_fd() and cvt_df() (arch/ppc/kernel/misc.S,
arch/ppc64/kernel/misc.S, arch/powerpc/kernel/misc_32.S,
arch/powerpc/kernel/misc_64.S) into a single version in fpu.S.  The
new version takes a pointer to thread_struct and applies the correct
offset itself, rather than a pointer to the fpscr field itself, again
to avoid confusion as to which is the correct field to use.

Finally, this patch makes ARCH=ppc64 also use the consolidated fpu.S
code, which it previously did not.

Built for G5 (ARCH=ppc64 and ARCH=powerpc), 32-bit powermac (ARCH=ppc
and ARCH=powerpc) and Walnut (ARCH=ppc, CONFIG_MATH_EMULATION=y).
Booted on G5 (ARCH=powerpc) and things which previously fell over no
longer do.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
David Gibson authored and Paul Mackerras committed Oct 27, 2005
1 parent fda262b commit 25c8a78
Show file tree
Hide file tree
Showing 22 changed files with 59 additions and 209 deletions.
5 changes: 2 additions & 3 deletions arch/powerpc/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_PPC64) += entry_64.o
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds

obj-y += process.o init_task.o time.o \
Expand All @@ -49,7 +48,7 @@ else
# stuff used from here for ARCH=ppc or ARCH=ppc64
obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o

fpux-$(CONFIG_PPC32) += fpu.o
extra-$(CONFIG_PPC_FPU) += $(fpux-y)

endif

extra-$(CONFIG_PPC_FPU) += fpu.o
31 changes: 28 additions & 3 deletions arch/powerpc/kernel/fpu.S
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ _GLOBAL(load_up_fpu)
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r4)
stfd fr0,THREAD_FPSCR(r4)
LDL r5,PT_REGS(r4)
tophys(r5,r5)
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
Expand All @@ -71,7 +71,7 @@ _GLOBAL(load_up_fpu)
or r12,r12,r4
std r12,_MSR(r1)
#endif
lfd fr0,THREAD_FPSCR-4(r5)
lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
Expand Down Expand Up @@ -104,7 +104,7 @@ _GLOBAL(giveup_fpu)
CMPI 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r3)
stfd fr0,THREAD_FPSCR(r3)
beq 1f
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
Expand All @@ -117,3 +117,28 @@ _GLOBAL(giveup_fpu)
STL r5,OFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr

/*
* These are used in the alignment trap handler when emulating
* single-precision loads and stores.
* We restore and save the fpscr so the task gets the same result
* and exceptions as if the cpu had performed the load or store.
*/

_GLOBAL(cvt_fd)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
blr

_GLOBAL(cvt_df)
lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
mtfsf 0xff,0
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0
stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
blr
27 changes: 0 additions & 27 deletions arch/powerpc/kernel/misc_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -992,33 +992,6 @@ _GLOBAL(_get_SP)
mr r3,r1 /* Close enough */
blr

/*
* These are used in the alignment trap handler when emulating
* single-precision loads and stores.
* We restore and save the fpscr so the task gets the same result
* and exceptions as if the cpu had performed the load or store.
*/

#ifdef CONFIG_PPC_FPU
_GLOBAL(cvt_fd)
lfd 0,-4(r5) /* load up fpscr value */
mtfsf 0xff,0
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0 /* save new fpscr value */
stfd 0,-4(r5)
blr

_GLOBAL(cvt_df)
lfd 0,-4(r5) /* load up fpscr value */
mtfsf 0xff,0
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0 /* save new fpscr value */
stfd 0,-4(r5)
blr
#endif

/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
Expand Down
19 changes: 0 additions & 19 deletions arch/powerpc/kernel/misc_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -462,25 +462,6 @@ _GLOBAL(_outsl_ns)
sync
blr


_GLOBAL(cvt_fd)
lfd 0,0(r5) /* load up fpscr value */
mtfsf 0xff,0
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0 /* save new fpscr value */
stfd 0,0(r5)
blr

_GLOBAL(cvt_df)
lfd 0,0(r5) /* load up fpscr value */
mtfsf 0xff,0
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0 /* save new fpscr value */
stfd 0,0(r5)
blr

/*
* identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#endif
#endif /* CONFIG_SMP */
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
memset(current->thread.vr, 0, sizeof(current->thread.vr));
memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/signal_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
ELF_NFPREG * sizeof(double)))
return 1;

current->thread.fpscr = 0; /* turn off all fp exceptions */
current->thread.fpscr.val = 0; /* turn off all fp exceptions */

#ifdef CONFIG_ALTIVEC
/* save altivec registers */
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ static void parse_fpe(struct pt_regs *regs)

flush_fp_to_thread(current);

fpscr = current->thread.fpscr;
fpscr = current->thread.fpscr.val;

/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
Expand Down
4 changes: 2 additions & 2 deletions arch/ppc/kernel/align.c
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_fd(&data.f, &data.d, &current->thread.fpscr);
cvt_fd(&data.f, &data.d, &current->thread);
preempt_enable();
#else
return 0;
Expand All @@ -385,7 +385,7 @@ fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.d, &data.f, &current->thread.fpscr);
cvt_df(&data.d, &data.f, &current->thread);
preempt_enable();
#else
return 0;
Expand Down
27 changes: 0 additions & 27 deletions arch/ppc/kernel/misc.S
Original file line number Diff line number Diff line change
Expand Up @@ -967,33 +967,6 @@ _GLOBAL(_get_SP)
mr r3,r1 /* Close enough */
blr

/*
* These are used in the alignment trap handler when emulating
* single-precision loads and stores.
* We restore and save the fpscr so the task gets the same result
* and exceptions as if the cpu had performed the load or store.
*/

#ifdef CONFIG_PPC_FPU
_GLOBAL(cvt_fd)
lfd 0,-4(r5) /* load up fpscr value */
mtfsf 0xff,0
lfs 0,0(r3)
stfd 0,0(r4)
mffs 0 /* save new fpscr value */
stfd 0,-4(r5)
blr

_GLOBAL(cvt_df)
lfd 0,-4(r5) /* load up fpscr value */
mtfsf 0xff,0
lfd 0,0(r3)
stfs 0,0(r4)
mffs 0 /* save new fpscr value */
stfd 0,-4(r5)
blr
#endif

/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
Expand Down
2 changes: 1 addition & 1 deletion arch/ppc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
last_task_used_spe = NULL;
#endif
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr = 0;
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
memset(current->thread.vr, 0, sizeof(current->thread.vr));
memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
Expand Down
2 changes: 1 addition & 1 deletion arch/ppc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -659,7 +659,7 @@ void program_check_exception(struct pt_regs *regs)
giveup_fpu(current);
preempt_enable();

fpscr = current->thread.fpscr;
fpscr = current->thread.fpscr.val;
fpscr &= fpscr << 22; /* mask summary bits with enables */
if (fpscr & FPSCR_VX)
code = FPE_FLTINV;
Expand Down
2 changes: 1 addition & 1 deletion arch/ppc/math-emu/sfp-machine.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ extern int fp_pack_ds(void *, long, unsigned long, unsigned long, long, long);
#include <linux/kernel.h>
#include <linux/sched.h>

#define __FPU_FPSCR (current->thread.fpscr)
#define __FPU_FPSCR (current->thread.fpscr.val)

/* We only actually write to the destination register
* if exceptions signalled (if any) will not trap.
Expand Down
3 changes: 3 additions & 0 deletions arch/ppc64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,9 @@ config BOOTX_TEXT
config POWER4
def_bool y

config PPC_FPU
def_bool y

config POWER4_ONLY
bool "Optimize for POWER4"
default n
Expand Down
1 change: 1 addition & 0 deletions arch/ppc64/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ endif
CFLAGS += $(call cc-option,-funit-at-a-time)

head-y := arch/ppc64/kernel/head.o
head-y += arch/powerpc/kernel/fpu.o

libs-y += arch/ppc64/lib/
core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
Expand Down
4 changes: 2 additions & 2 deletions arch/ppc64/kernel/align.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ fix_alignment(struct pt_regs *regs)
/* Doing stfs, have to convert to single */
preempt_disable();
enable_kernel_fp();
cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr);
cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread);
disable_kernel_fp();
preempt_enable();
}
Expand Down Expand Up @@ -349,7 +349,7 @@ fix_alignment(struct pt_regs *regs)
/* Doing lfs, have to convert to double */
preempt_disable();
enable_kernel_fp();
cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr);
cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread);
disable_kernel_fp();
preempt_enable();
}
Expand Down
59 changes: 2 additions & 57 deletions arch/ppc64/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ _stext:
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
b .__start_initialization_multiplatform
b .__start_initialization_multiplatform
END_FTR_SECTION(0, 1)
#endif /* CONFIG_PPC_MULTIPLATFORM */

Expand Down Expand Up @@ -747,6 +747,7 @@ bad_stack:
* any task or sent any task a signal, you should use
* ret_from_except or ret_from_except_lite instead of this.
*/
.globl fast_exception_return
fast_exception_return:
ld r12,_MSR(r1)
ld r11,_NIP(r1)
Expand Down Expand Up @@ -858,62 +859,6 @@ fp_unavailable_common:
bl .kernel_fp_unavailable_exception
BUG_OPCODE

/*
* load_up_fpu(unused, unused, tsk)
* Disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
* On SMP we know the fpu is free, since we give it up every
* switch (ie, no lazy save of the FP registers).
* On entry: r13 == 'current' && last_task_used_math != 'current'
*/
_STATIC(load_up_fpu)
mfmsr r5 /* grab the current MSR */
ori r5,r5,MSR_FP
mtmsrd r5 /* enable use of fpu now */
isync
/*
* For SMP, we don't do lazy FPU switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_fpu in switch_to.
*
*/
#ifndef CONFIG_SMP
ld r3,last_task_used_math@got(r2)
ld r4,0(r3)
cmpdi 0,r4,0
beq 1f
/* Save FP state to last_task_used_math's THREAD struct */
addi r4,r4,THREAD
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
/* Disable FP for last_task_used_math */
ld r5,PT_REGS(r4)
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r6,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r6
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
ld r4,THREAD_FPEXC_MODE(r5)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */
subi r4,r5,THREAD /* Back to 'current' */
std r4,0(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
b fast_exception_return

.align 7
.globl altivec_unavailable_common
altivec_unavailable_common:
Expand Down
Loading

0 comments on commit 25c8a78

Please sign in to comment.