Skip to content

Commit

Permalink
powerpc: Create msr_check_and_{set,clear}()
Browse files Browse the repository at this point in the history
Create helper functions to set and clear MSR bits after first
checking if they are already set. Grouping them will make it
easy to avoid the MSR writes in a subsequent optimisation.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Anton Blanchard authored and Michael Ellerman committed Dec 1, 2015
1 parent 1552cd7 commit a0e72cf
Showing 1 changed file with 52 additions and 55 deletions.
107 changes: 52 additions & 55 deletions arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
@@ -87,23 +87,46 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

#ifdef CONFIG_PPC_FPU
void giveup_fpu(struct task_struct *tsk)
static void msr_check_and_set(unsigned long bits)
{
u64 oldmsr = mfmsr();
u64 newmsr;
unsigned long oldmsr = mfmsr();
unsigned long newmsr;

check_if_tm_restore_required(tsk);
newmsr = oldmsr | bits;

newmsr = oldmsr | MSR_FP;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr |= MSR_VSX;
#endif

if (oldmsr != newmsr)
mtmsr_isync(newmsr);
}

static void msr_check_and_clear(unsigned long bits)
{
unsigned long oldmsr = mfmsr();
unsigned long newmsr;

newmsr = oldmsr & ~bits;

#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr &= ~MSR_VSX;
#endif

if (oldmsr != newmsr)
mtmsr_isync(newmsr);
}

#ifdef CONFIG_PPC_FPU
void giveup_fpu(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);

msr_check_and_set(MSR_FP);
__giveup_fpu(tsk);
msr_check_and_clear(MSR_FP);
}
EXPORT_SYMBOL(giveup_fpu);

@@ -144,45 +167,32 @@ void enable_kernel_fp(void)
{
WARN_ON(preemptible());

if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
giveup_fpu(current);
} else {
u64 oldmsr = mfmsr();
msr_check_and_set(MSR_FP);

if (!(oldmsr & MSR_FP))
mtmsr_isync(oldmsr | MSR_FP);
}
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
__giveup_fpu(current);
}
EXPORT_SYMBOL(enable_kernel_fp);

#ifdef CONFIG_ALTIVEC
void giveup_altivec(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;

check_if_tm_restore_required(tsk);

newmsr = oldmsr | MSR_VEC;
if (oldmsr != newmsr)
mtmsr_isync(newmsr);

msr_check_and_set(MSR_VEC);
__giveup_altivec(tsk);
msr_check_and_clear(MSR_VEC);
}
EXPORT_SYMBOL(giveup_altivec);

void enable_kernel_altivec(void)
{
WARN_ON(preemptible());

if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
giveup_altivec(current);
} else {
u64 oldmsr = mfmsr();
msr_check_and_set(MSR_VEC);

if (!(oldmsr & MSR_VEC))
mtmsr_isync(oldmsr | MSR_VEC);
}
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
__giveup_altivec(current);
}
EXPORT_SYMBOL(enable_kernel_altivec);

@@ -207,34 +217,30 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#ifdef CONFIG_VSX
void giveup_vsx(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;

check_if_tm_restore_required(tsk);

newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
if (oldmsr != newmsr)
mtmsr_isync(newmsr);

msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (tsk->thread.regs->msr & MSR_FP)
__giveup_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC)
__giveup_altivec(tsk);
__giveup_vsx(tsk);
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
EXPORT_SYMBOL(giveup_vsx);

void enable_kernel_vsx(void)
{
WARN_ON(preemptible());

if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
giveup_vsx(current);
} else {
u64 oldmsr = mfmsr();
msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);

if (!(oldmsr & MSR_VSX))
mtmsr_isync(oldmsr | MSR_VSX);
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
if (current->thread.regs->msr & MSR_FP)
__giveup_fpu(current);
if (current->thread.regs->msr & MSR_VEC)
__giveup_altivec(current);
__giveup_vsx(current);
}
}
EXPORT_SYMBOL(enable_kernel_vsx);
@@ -256,31 +262,22 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#ifdef CONFIG_SPE
void giveup_spe(struct task_struct *tsk)
{
u64 oldmsr = mfmsr();
u64 newmsr;

check_if_tm_restore_required(tsk);

newmsr = oldmsr | MSR_SPE;
if (oldmsr != newmsr)
mtmsr_isync(newmsr);

msr_check_and_set(MSR_SPE);
__giveup_spe(tsk);
msr_check_and_clear(MSR_SPE);
}
EXPORT_SYMBOL(giveup_spe);

void enable_kernel_spe(void)
{
WARN_ON(preemptible());

if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
giveup_spe(current);
} else {
u64 oldmsr = mfmsr();
msr_check_and_set(MSR_SPE);

if (!(oldmsr & MSR_SPE))
mtmsr_isync(oldmsr | MSR_SPE);
}
if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
__giveup_spe(current);
}
EXPORT_SYMBOL(enable_kernel_spe);

0 comments on commit a0e72cf

Please sign in to comment.