Skip to content

Commit

Permalink
x86-32, fpu: Rewrite fpu_save_init()
Browse files Browse the repository at this point in the history
Rewrite fpu_save_init() to prepare for merging with 64-bit.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1283563039-3466-12-git-send-email-brgerst@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Brian Gerst authored and H. Peter Anvin committed Sep 9, 2010
1 parent eec73f8 commit 58a992b
Showing 1 changed file with 22 additions and 25 deletions.
47 changes: 22 additions & 25 deletions arch/x86/include/asm/i387.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ static __always_inline __pure bool use_xsave(void)
return static_cpu_has(X86_FEATURE_XSAVE);
}

static __always_inline __pure bool use_fxsr(void)
{
return static_cpu_has(X86_FEATURE_FXSR);
}

extern void __sanitize_i387_state(struct task_struct *);

static inline void sanitize_i387_state(struct task_struct *tsk)
Expand Down Expand Up @@ -211,6 +216,12 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
return 0;
}

static inline void fpu_fxsave(struct fpu *fpu)
{
asm volatile("fxsave %[fx]"
: [fx] "=m" (fpu->state->fxsave));
}

/* We need a safe address that is cheap to find and that is already
in L1 during context switch. The best choices are unfortunately
different for UP and SMP */
Expand All @@ -226,36 +237,24 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
static inline void fpu_save_init(struct fpu *fpu)
{
if (use_xsave()) {
struct xsave_struct *xstate = &fpu->state->xsave;
struct i387_fxsave_struct *fx = &fpu->state->fxsave;

fpu_xsave(fpu);

/*
* xsave header may indicate the init state of the FP.
*/
if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
goto end;

if (unlikely(fx->swd & X87_FSW_ES))
asm volatile("fnclex");

/*
* we can do a simple return here or be paranoid :)
*/
goto clear_state;
if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
return;
} else if (use_fxsr()) {
fpu_fxsave(fpu);
} else {
asm volatile("fsave %[fx]; fwait"
: [fx] "=m" (fpu->state->fsave));
return;
}

/* Use more nops than strictly needed in case the compiler
varies code */
alternative_input(
"fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
[fx] "m" (fpu->state->fxsave),
[fsw] "m" (fpu->state->fxsave.swd) : "memory");
clear_state:
if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
asm volatile("fnclex");

/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
Expand All @@ -265,8 +264,6 @@ static inline void fpu_save_init(struct fpu *fpu)
"fildl %[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
end:
;
}

#endif /* CONFIG_X86_64 */
Expand Down

0 comments on commit 58a992b

Please sign in to comment.