Skip to content

Commit

Permalink
x86: Introduce 'struct fpu' and related API
Browse files Browse the repository at this point in the history
Currently all fpu state access is through tsk->thread.xstate.  Since we wish
to generalize fpu access to non-task contexts, wrap the state in a new
'struct fpu' and convert existing access to use an fpu API.

Signal frame handlers are not converted to the API since they will remain
task context only things.

Signed-off-by: Avi Kivity <avi@redhat.com>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1273135546-29690-3-git-send-email-avi@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
  • Loading branch information
Avi Kivity authored and H. Peter Anvin committed May 10, 2010
1 parent c9ad488 commit 8660328
Show file tree
Hide file tree
Showing 9 changed files with 160 additions and 103 deletions.
115 changes: 85 additions & 30 deletions arch/x86/include/asm/i387.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/hardirq.h>
#include <linux/slab.h>
#include <asm/asm.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
Expand Down Expand Up @@ -103,10 +104,10 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
static inline void clear_fpu_state(struct task_struct *tsk)
static inline void fpu_clear(struct fpu *fpu)
{
struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
struct xsave_struct *xstate = &fpu->state->xsave;
struct i387_fxsave_struct *fx = &fpu->state->fxsave;

/*
* xsave header may indicate the init state of the FP.
Expand All @@ -123,6 +124,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
X86_FEATURE_FXSAVE_LEAK);
}

static inline void clear_fpu_state(struct task_struct *tsk)
{
fpu_clear(&tsk->thread.fpu);
}

static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
Expand All @@ -147,7 +153,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
return err;
}

static inline void fxsave(struct task_struct *tsk)
static inline void fpu_fxsave(struct fpu *fpu)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
Expand All @@ -157,42 +163,45 @@ static inline void fxsave(struct task_struct *tsk)
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
: "=m" (tsk->thread.xstate->fxsave));
: "=m" (fpu->state->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
: "=m" (tsk->thread.xstate->fxsave));
: "=m" (fpu->state->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__("rex64/fxsave (%1)"
: "=m" (tsk->thread.xstate->fxsave)
: "cdaSDb" (&tsk->thread.xstate->fxsave));
: "=m" (fpu->state->fxsave)
: "cdaSDb" (&fpu->state->fxsave));
#endif
}

static inline void __save_init_fpu(struct task_struct *tsk)
static inline void fpu_save_init(struct fpu *fpu)
{
if (use_xsave())
xsave(tsk);
fpu_xsave(fpu);
else
fxsave(tsk);
fpu_fxsave(fpu);

clear_fpu_state(tsk);
fpu_clear(fpu);
}

static inline void __save_init_fpu(struct task_struct *tsk)
{
fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}

#else /* CONFIG_X86_32 */

#ifdef CONFIG_MATH_EMULATION
extern void finit_task(struct task_struct *tsk);
extern void finit_soft_fpu(struct i387_soft_struct *soft);
#else
static inline void finit_task(struct task_struct *tsk)
{
}
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif

static inline void tolerant_fwait(void)
Expand Down Expand Up @@ -228,13 +237,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
/*
* These must be called with preempt disabled
*/
static inline void __save_init_fpu(struct task_struct *tsk)
static inline void fpu_save_init(struct fpu *fpu)
{
if (use_xsave()) {
struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
struct xsave_struct *xstate = &fpu->state->xsave;
struct i387_fxsave_struct *fx = &fpu->state->fxsave;

xsave(tsk);
fpu_xsave(fpu);

/*
* xsave header may indicate the init state of the FP.
Expand All @@ -258,8 +267,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
[fx] "m" (tsk->thread.xstate->fxsave),
[fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
[fx] "m" (fpu->state->fxsave),
[fsw] "m" (fpu->state->fxsave.swd) : "memory");
clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
Expand All @@ -271,17 +280,34 @@ static inline void __save_init_fpu(struct task_struct *tsk)
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
end:
;
}

static inline void __save_init_fpu(struct task_struct *tsk)
{
fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}


#endif /* CONFIG_X86_64 */

static inline int restore_fpu_checking(struct task_struct *tsk)
static inline int fpu_fxrstor_checking(struct fpu *fpu)
{
return fxrstor_checking(&fpu->state->fxsave);
}

static inline int fpu_restore_checking(struct fpu *fpu)
{
if (use_xsave())
return xrstor_checking(&tsk->thread.xstate->xsave);
return fpu_xrstor_checking(fpu);
else
return fxrstor_checking(&tsk->thread.xstate->fxsave);
return fpu_fxrstor_checking(fpu);
}

static inline int restore_fpu_checking(struct task_struct *tsk)
{
return fpu_restore_checking(&tsk->thread.fpu);
}

/*
Expand Down Expand Up @@ -409,30 +435,59 @@ static inline void clear_fpu(struct task_struct *tsk)
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.xstate->fxsave.cwd;
return tsk->thread.fpu.state->fxsave.cwd;
} else {
return (unsigned short)tsk->thread.xstate->fsave.cwd;
return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
}
}

static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.xstate->fxsave.swd;
return tsk->thread.fpu.state->fxsave.swd;
} else {
return (unsigned short)tsk->thread.xstate->fsave.swd;
return (unsigned short)tsk->thread.fpu.state->fsave.swd;
}
}

static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
return tsk->thread.xstate->fxsave.mxcsr;
return tsk->thread.fpu.state->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}

static bool fpu_allocated(struct fpu *fpu)
{
return fpu->state != NULL;
}

static inline int fpu_alloc(struct fpu *fpu)
{
if (fpu_allocated(fpu))
return 0;
fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
if (!fpu->state)
return -ENOMEM;
WARN_ON((unsigned long)fpu->state & 15);
return 0;
}

static inline void fpu_free(struct fpu *fpu)
{
if (fpu->state) {
kmem_cache_free(task_xstate_cachep, fpu->state);
fpu->state = NULL;
}
}

static inline void fpu_copy(struct fpu *dst, struct fpu *src)
{
memcpy(dst->state, src->state, xstate_size);
}

#endif /* __ASSEMBLY__ */

#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
Expand Down
6 changes: 5 additions & 1 deletion arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,10 @@ union thread_xstate {
struct xsave_struct xsave;
};

struct fpu {
union thread_xstate *state;
};

#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);

Expand Down Expand Up @@ -457,7 +461,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
union thread_xstate *xstate;
struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
Expand Down
7 changes: 4 additions & 3 deletions arch/x86/include/asm/xsave.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
void __user *fpstate,
struct _fpx_sw_bytes *sw);

static inline int xrstor_checking(struct xsave_struct *fx)
static inline int fpu_xrstor_checking(struct fpu *fpu)
{
struct xsave_struct *fx = &fpu->state->xsave;
int err;

asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
Expand Down Expand Up @@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
: "memory");
}

static inline void xsave(struct task_struct *tsk)
static inline void fpu_xsave(struct fpu *fpu)
{
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
: : "D" (&(tsk->thread.xstate->xsave)),
: : "D" (&(fpu->state->xsave)),
"a" (-1), "d"(-1) : "memory");
}
#endif
Loading

0 comments on commit 8660328

Please sign in to comment.