Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 101735
b: refs/heads/master
c: ce48b21
h: refs/heads/master
i:
  101733: 42a2949
  101731: 6dbcb4b
  101727: 63464f0
v: v3
  • Loading branch information
Michael Neuling authored and Paul Mackerras committed Jul 1, 2008
1 parent 872b3af commit 060bee9
Show file tree
Hide file tree
Showing 17 changed files with 452 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 72ffff5b1792b0fa4d40a8e2f3276fff999820ec
refs/heads/master: ce48b2100785e5ca629fb3aa8e3b50aca808f692
5 changes: 5 additions & 0 deletions trunk/arch/powerpc/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,11 @@ _GLOBAL(_switch)
mflr r20 /* Return to switch caller */
mfmsr r22
li r0, MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r0,r0,MSR_VSX@h /* Disable VSX */
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r0,r0,MSR_VEC@h /* Disable altivec */
Expand Down
16 changes: 13 additions & 3 deletions trunk/arch/powerpc/kernel/fpu.S
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
_GLOBAL(load_up_fpu)
mfmsr r5
ori r5,r5,MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
Expand All @@ -73,7 +78,7 @@ _GLOBAL(load_up_fpu)
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
SAVE_32FPVSRS(0, r5, r4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
PPC_LL r5,PT_REGS(r4)
Expand All @@ -100,7 +105,7 @@ _GLOBAL(load_up_fpu)
#endif
lfd fr0,THREAD_FPSCR(r5)
MTFSF_L(fr0)
REST_32FPRS(0, r5)
REST_32FPVSRS(0, r4, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
Expand All @@ -119,6 +124,11 @@ _GLOBAL(load_up_fpu)
_GLOBAL(giveup_fpu)
mfmsr r5
ori r5,r5,MSR_FP
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
SYNC_601
ISYNC_601
MTMSRD(r5) /* enable use of fpu now */
Expand All @@ -129,7 +139,7 @@ _GLOBAL(giveup_fpu)
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
SAVE_32FPRS(0, r3)
SAVE_32FPVSRS(0, r4 ,r3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
Expand Down
65 changes: 65 additions & 0 deletions trunk/arch/powerpc/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
. = 0xf20
b altivec_unavailable_pSeries

. = 0xf40
b vsx_unavailable_pSeries

#ifdef CONFIG_CBE_RAS
HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
#endif /* CONFIG_CBE_RAS */
Expand All @@ -297,6 +300,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES(., performance_monitor)
STD_EXCEPTION_PSERIES(., altivec_unavailable)
STD_EXCEPTION_PSERIES(., vsx_unavailable)

/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
Expand Down Expand Up @@ -836,6 +840,67 @@ _STATIC(load_up_altivec)
blr
#endif /* CONFIG_ALTIVEC */

.align 7
.globl vsx_unavailable_common
vsx_unavailable_common:
EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
bne .load_up_vsx
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
ENABLE_INTS
bl .vsx_unavailable_exception
b .ret_from_except

#ifdef CONFIG_VSX
/*
* load_up_vsx(unused, unused, tsk)
* Disable VSX for the task which had it previously,
* and save its vector registers in its thread_struct.
* Reuse the fp and vsx saves, but first check to see if they have
* been saved already.
* On entry: r13 == 'current' && last_task_used_vsx != 'current'
*/
_STATIC(load_up_vsx)
/* Load FP and VSX registers if they haven't been done yet */
andi. r5,r12,MSR_FP
beql+ load_up_fpu /* skip if already loaded */
andis. r5,r12,MSR_VEC@h
beql+ load_up_altivec /* skip if already loaded */

#ifndef CONFIG_SMP
ld r3,last_task_used_vsx@got(r2)
ld r4,0(r3)
cmpdi 0,r4,0
beq 1f
/* Disable VSX for last_task_used_vsx */
addi r4,r4,THREAD
ld r5,PT_REGS(r4)
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r6,MSR_VSX@h
andc r6,r4,r6
std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
ld r4,PACACURRENT(r13)
addi r4,r4,THREAD /* Get THREAD */
li r6,1
stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
#ifndef CONFIG_SMP
/* Update last_task_used_math to 'current' */
ld r4,PACACURRENT(r13)
std r4,0(r3)
#endif /* CONFIG_SMP */
b fast_exception_return
#endif /* CONFIG_VSX */

/*
* Hash table stuff
*/
Expand Down
33 changes: 33 additions & 0 deletions trunk/arch/powerpc/kernel/misc_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,39 @@ _GLOBAL(giveup_altivec)

#endif /* CONFIG_ALTIVEC */

#ifdef CONFIG_VSX
/*
* giveup_vsx(tsk)
* Disable VSX for the task given as the argument,
* and save the vector registers in its thread_struct.
* Enables the VSX for use in the kernel on return.
*/
_GLOBAL(giveup_vsx)
mfmsr r5
oris r5,r5,MSR_VSX@h
mtmsrd r5 /* enable use of VSX now */
isync

cmpdi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3)
cmpdi 0,r5,0
beq 1f
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_VSX@h
andc r4,r4,r3 /* disable VSX for previous task */
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
ld r4,last_task_used_vsx@got(r2)
std r5,0(r4)
#endif /* CONFIG_SMP */
blr

#endif /* CONFIG_VSX */

/* kexec_wait(phys_cpu)
*
* wait for the flag to change, indicating this kernel is going away but
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/kernel/ppc32.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ struct mcontext32 {
elf_fpregset_t mc_fregs;
unsigned int mc_pad[2];
elf_vrregset_t32 mc_vregs __attribute__((__aligned__(16)));
elf_vsrreghalf_t32 mc_vsregs __attribute__((__aligned__(16)));
};

struct ucontext32 {
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/powerpc/kernel/ppc_ksyms.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,9 @@ EXPORT_SYMBOL(giveup_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
EXPORT_SYMBOL(giveup_vsx);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
EXPORT_SYMBOL(giveup_spe);
#endif /* CONFIG_SPE */
Expand Down
107 changes: 106 additions & 1 deletion trunk/arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ extern unsigned long _get_SP(void);
#ifndef CONFIG_SMP
struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
struct task_struct *last_task_used_vsx = NULL;
struct task_struct *last_task_used_spe = NULL;
#endif

Expand Down Expand Up @@ -106,11 +107,23 @@ EXPORT_SYMBOL(enable_kernel_fp);

int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
{
#ifdef CONFIG_VSX
int i;
elf_fpreg_t *reg;
#endif

if (!tsk->thread.regs)
return 0;
flush_fp_to_thread(current);

#ifdef CONFIG_VSX
reg = (elf_fpreg_t *)fpregs;
for (i = 0; i < ELF_NFPREG - 1; i++, reg++)
*reg = tsk->thread.TS_FPR(i);
memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t));
#else
memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
#endif

return 1;
}
Expand Down Expand Up @@ -149,7 +162,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
}
}

int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
int dump_task_altivec(struct task_struct *tsk, elf_vrreg_t *vrregs)
{
/* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
* separately, see below */
Expand Down Expand Up @@ -179,6 +192,80 @@ int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
}
#endif /* CONFIG_ALTIVEC */

#ifdef CONFIG_VSX
#if 0
/* not currently used, but some crazy RAID module might want to later */
void enable_kernel_vsx(void)
{
WARN_ON(preemptible());

#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
giveup_vsx(current);
else
giveup_vsx(NULL); /* just enable vsx for kernel - force */
#else
giveup_vsx(last_task_used_vsx);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_vsx);
#endif

void flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VSX) {
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
giveup_vsx(tsk);
}
preempt_enable();
}
}

/*
* This dumps the lower half 64bits of the first 32 VSX registers.
* This needs to be called with dump_task_fp and dump_task_altivec to
* get all the VSX state.
*/
int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs)
{
elf_vrreg_t *reg;
double buf[32];
int i;

if (tsk == current)
flush_vsx_to_thread(tsk);

reg = (elf_vrreg_t *)vrregs;

for (i = 0; i < 32 ; i++)
buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
memcpy(reg, buf, sizeof(buf));

return 1;
}
#endif /* CONFIG_VSX */

int dump_task_vector(struct task_struct *tsk, elf_vrregset_t *vrregs)
{
int rc = 0;
elf_vrreg_t *regs = (elf_vrreg_t *)vrregs;
#ifdef CONFIG_ALTIVEC
rc = dump_task_altivec(tsk, regs);
if (rc)
return rc;
regs += ELF_NVRREG;
#endif

#ifdef CONFIG_VSX
rc = dump_task_vsx(tsk, regs);
#endif
return rc;
}

#ifdef CONFIG_SPE

void enable_kernel_spe(void)
Expand Down Expand Up @@ -233,6 +320,10 @@ void discard_lazy_cpu_state(void)
if (last_task_used_altivec == current)
last_task_used_altivec = NULL;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
if (last_task_used_vsx == current)
last_task_used_vsx = NULL;
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
if (last_task_used_spe == current)
last_task_used_spe = NULL;
Expand Down Expand Up @@ -297,6 +388,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
giveup_vsx(prev);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/*
* If the previous thread used spe in the last quantum
Expand All @@ -317,6 +412,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
if (new->thread.regs && last_task_used_vsx == new)
new->thread.regs->msr |= MSR_VSX;
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_spe
Expand Down Expand Up @@ -417,6 +516,8 @@ static struct regbit {
{MSR_EE, "EE"},
{MSR_PR, "PR"},
{MSR_FP, "FP"},
{MSR_VEC, "VEC"},
{MSR_VSX, "VSX"},
{MSR_ME, "ME"},
{MSR_IR, "IR"},
{MSR_DR, "DR"},
Expand Down Expand Up @@ -534,6 +635,7 @@ void prepare_to_copy(struct task_struct *tsk)
{
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_vsx_to_thread(current);
flush_spe_to_thread(current);
}

Expand Down Expand Up @@ -689,6 +791,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#endif

discard_lazy_cpu_state();
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
Expand Down
Loading

0 comments on commit 060bee9

Please sign in to comment.