Skip to content

Commit

Permalink
powerpc: Define 32/64 bit asm macros and use them in fpu.S
Browse files Browse the repository at this point in the history
These macros help in writing assembly code that works for both ppc32
and ppc64.  With this we now have a common fpu.S.  This takes out
load_up_fpu from head_64.S.

Signed-off-by: Paul Mackerras <paulus@samba.org>
  • Loading branch information
Paul Mackerras committed Oct 6, 2005
1 parent 6ce52e6 commit b85a046
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 105 deletions.
72 changes: 29 additions & 43 deletions arch/powerpc/kernel/fpu.S
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,9 @@
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
*/
.globl load_up_fpu
load_up_fpu:
_GLOBAL(load_up_fpu)
mfmsr r5
ori r5,r5,MSR_FP
#ifdef CONFIG_PPC64BRIDGE
clrldi r5,r5,1 /* turn off 64-bit mode */
#endif /* CONFIG_PPC64BRIDGE */
SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
Expand All @@ -43,91 +39,81 @@ load_up_fpu:
* to another. Instead we call giveup_fpu in switch_to.
*/
#ifndef CONFIG_SMP
tophys(r6,0) /* get __pa constant */
addis r3,r6,last_task_used_math@ha
lwz r4,last_task_used_math@l(r3)
cmpwi 0,r4,0
LOADBASE(r3, last_task_used_math)
tophys(r3,r3)
LDL r4,OFF(last_task_used_math)(r3)
CMPI 0,r4,0
beq 1f
add r4,r4,r6
tophys(r4,r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r4)
lwz r5,PT_REGS(r4)
add r5,r5,r6
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
LDL r5,PT_REGS(r4)
tophys(r5,r5)
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
#ifdef CONFIG_PPC32
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
#else
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
ld r4,THREAD_FPEXC_MODE(r5)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
#endif
lfd fr0,THREAD_FPSCR-4(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
sub r4,r4,r6
stw r4,last_task_used_math@l(r3)
tovirt(r4,r4)
STL r4,OFF(last_task_used_math)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
b fast_exception_return

/*
* FP unavailable trap from kernel - print a message, but let
* the task use FP in the kernel until it returns to user mode.
*/
.globl KernelFP
KernelFP:
lwz r3,_MSR(r1)
ori r3,r3,MSR_FP
stw r3,_MSR(r1) /* enable use of FP after return */
lis r3,86f@h
ori r3,r3,86f@l
mr r4,r2 /* current */
lwz r5,_NIP(r1)
bl printk
b ret_from_except
86: .string "floating point used in kernel (task=%p, pc=%x)\n"
.align 4,0

/*
* giveup_fpu(tsk)
* Disable FP for the task given as the argument,
* and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
*/
.globl giveup_fpu
giveup_fpu:
_GLOBAL(giveup_fpu)
mfmsr r5
ori r5,r5,MSR_FP
SYNC_601
ISYNC_601
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
cmpwi 0,r3,0
CMPI 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpwi 0,r5,0
LDL r5,PT_REGS(r3)
CMPI 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r3)
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r3 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
lis r4,last_task_used_math@ha
stw r5,last_task_used_math@l(r4)
LOADBASE(r4,last_task_used_math)
STL r5,OFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr
58 changes: 1 addition & 57 deletions arch/powerpc/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ _stext:
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
b .__start_initialization_multiplatform
b .__start_initialization_multiplatform
END_FTR_SECTION(0, 1)
#endif /* CONFIG_PPC_MULTIPLATFORM */

Expand Down Expand Up @@ -857,62 +857,6 @@ fp_unavailable_common:
bl .kernel_fp_unavailable_exception
BUG_OPCODE

/*
* load_up_fpu(unused, unused, tsk)
* Disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
* On SMP we know the fpu is free, since we give it up every
* switch (ie, no lazy save of the FP registers).
* On entry: r13 == 'current' && last_task_used_math != 'current'
*/
_STATIC(load_up_fpu)
mfmsr r5 /* grab the current MSR */
ori r5,r5,MSR_FP
mtmsrd r5 /* enable use of fpu now */
isync
/*
* For SMP, we don't do lazy FPU switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_fpu in switch_to.
*
*/
#ifndef CONFIG_SMP
ld r3,last_task_used_math@got(r2)
ld r4,0(r3)
cmpdi 0,r4,0
beq 1f
/* Save FP state to last_task_used_math's THREAD struct */
addi r4,r4,THREAD
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
/* Disable FP for last_task_used_math */
ld r5,PT_REGS(r4)
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r6,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r6
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
ld r4,THREAD_FPEXC_MODE(r5)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
lfd fr0,THREAD_FPSCR(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */
subi r4,r5,THREAD /* Back to 'current' */
std r4,0(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
b fast_exception_return

.align 7
.globl altivec_unavailable_common
altivec_unavailable_common:
Expand Down
28 changes: 23 additions & 5 deletions include/asm-powerpc/ppc_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,13 @@
oris rn,rn,name##@h; \
ori rn,rn,name##@l

#define LOADBASE(rn,name) \
lis rn,name@highest; \
ori rn,rn,name@higher; \
rldicr rn,rn,32,31; \
oris rn,rn,name@ha
#define LOADBASE(rn,name) \
.section .toc,"aw"; \
1: .tc name[TC],name; \
.previous; \
ld rn,1b@toc(r2)

#define OFF(name) 0

#define SET_REG_TO_CONST(reg, value) \
lis reg,(((value)>>48)&0xFFFF); \
Expand All @@ -123,6 +124,23 @@
rldicr reg,reg,32,31; \
oris reg,reg,(label)@h; \
ori reg,reg,(label)@l;

/* operations for longs and pointers */
#define LDL ld
#define STL std
#define CMPI cmpdi

#else /* 32-bit */
#define LOADBASE(rn,name) \
lis rn,name@ha

#define OFF(name) name@l

/* operations for longs and pointers */
#define LDL lwz
#define STL stw
#define CMPI cmpwi

#endif

/* various errata or part fixups */
Expand Down

0 comments on commit b85a046

Please sign in to comment.