Skip to content

Commit

Permalink
powerpc: Prepare for splitting giveup_{fpu, altivec, vsx} in two
Browse files Browse the repository at this point in the history
This prepares for the decoupling of saving {fpu,altivec,vsx} registers and
marking {fpu,altivec,vsx} as being unused by a thread.

Currently giveup_{fpu,altivec,vsx}() does both however optimisations to
task switching can be made if these two operations are decoupled.
save_all() will permit the saving of registers to thread structs and leave
threads MSR with bits enabled.

This patch introduces no functional change.

Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Cyril Bur authored and Michael Ellerman committed Mar 2, 2016
1 parent 70fe3d9 commit de2a20a
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 1 deletion.
8 changes: 8 additions & 0 deletions arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,14 @@
#define MSR_HV 0
#endif

/*
* To be used in shared book E/book S, this avoids needing to worry about
* book S/book E in shared code
*/
#ifndef MSR_SPE
#define MSR_SPE 0
#endif

#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/include/asm/switch_to.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ static inline void disable_kernel_fp(void)
msr_check_and_clear(MSR_FP);
}
#else
static inline void __giveup_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
#endif

Expand All @@ -46,6 +47,8 @@ static inline void disable_kernel_altivec(void)
{
msr_check_and_clear(MSR_VEC);
}
#else
static inline void __giveup_altivec(struct task_struct *t) { }
#endif

#ifdef CONFIG_VSX
Expand All @@ -57,6 +60,8 @@ static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
#else
static inline void __giveup_vsx(struct task_struct *t) { }
#endif

#ifdef CONFIG_SPE
Expand All @@ -68,6 +73,8 @@ static inline void disable_kernel_spe(void)
{
msr_check_and_clear(MSR_SPE);
}
#else
static inline void __giveup_spe(struct task_struct *t) { }
#endif

static inline void clear_task_ebb(struct task_struct *t)
Expand Down
31 changes: 30 additions & 1 deletion arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -444,12 +444,41 @@ void restore_math(struct pt_regs *regs)
regs->msr = msr;
}

void save_all(struct task_struct *tsk)
{
unsigned long usermsr;

if (!tsk->thread.regs)
return;

usermsr = tsk->thread.regs->msr;

if ((usermsr & msr_all_available) == 0)
return;

msr_check_and_set(msr_all_available);

if (usermsr & MSR_FP)
__giveup_fpu(tsk);

if (usermsr & MSR_VEC)
__giveup_altivec(tsk);

if (usermsr & MSR_VSX)
__giveup_vsx(tsk);

if (usermsr & MSR_SPE)
__giveup_spe(tsk);

msr_check_and_clear(msr_all_available);
}

void flush_all_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
giveup_all(tsk);
save_all(tsk);

#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
Expand Down

0 comments on commit de2a20a

Please sign in to comment.