Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 287751
b: refs/heads/master
c: 59cca65
h: refs/heads/master
i:
  287749: 79e215e
  287747: 021073e
  287743: 4f58ce1
v: v3
  • Loading branch information
Dmitry Kasatkin authored and James Morris committed Feb 20, 2012
1 parent 6378e60 commit 34550f6
Show file tree
Hide file tree
Showing 8 changed files with 53 additions and 57 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7e16838d94b566a17b65231073d179bc04d590c8
refs/heads/master: 59cca653a601372e9b4a430d867377a3e4a36d76
53 changes: 15 additions & 38 deletions trunk/arch/x86/include/asm/i387.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,10 @@ extern unsigned int sig_xstate_size;
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
extern void __math_state_restore(struct task_struct *);
extern void math_state_restore(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);

DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);

extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
xstateregs_get;
Expand Down Expand Up @@ -270,16 +269,6 @@ static inline int fpu_restore_checking(struct fpu *fpu)

static inline int restore_fpu_checking(struct task_struct *tsk)
{
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
alternative_input(
ASM_NOP8 ASM_NOP2,
"emms\n\t" /* clear stack tags */
"fildl %P[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (tsk->thread.fpu.has_fpu));

return fpu_restore_checking(&tsk->thread.fpu);
}

Expand All @@ -290,21 +279,19 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
*/
static inline int __thread_has_fpu(struct task_struct *tsk)
{
return tsk->thread.fpu.has_fpu;
return tsk->thread.has_fpu;
}

/* Must be paired with an 'stts' after! */
static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{
tsk->thread.fpu.has_fpu = 0;
percpu_write(fpu_owner_task, NULL);
tsk->thread.has_fpu = 0;
}

/* Must be paired with a 'clts' before! */
static inline void __thread_set_has_fpu(struct task_struct *tsk)
{
tsk->thread.fpu.has_fpu = 1;
percpu_write(fpu_owner_task, tsk);
tsk->thread.has_fpu = 1;
}

/*
Expand Down Expand Up @@ -349,36 +336,30 @@ typedef struct { int preload; } fpu_switch_t;
* We don't do that yet, so "fpu_lazy_restore()" always returns
* false, but some day..
*/
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{
return new == percpu_read_stable(fpu_owner_task) &&
cpu == new->thread.fpu.last_cpu;
}
#define fpu_lazy_restore(tsk) (0)
#define fpu_lazy_state_intact(tsk) do { } while (0)

static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
{
fpu_switch_t fpu;

fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
cpu = ~0;
old->thread.fpu.last_cpu = cpu;
old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
if (__save_init_fpu(old))
fpu_lazy_state_intact(old);
__thread_clear_has_fpu(old);
old->fpu_counter++;

/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
new->fpu_counter++;
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
} else
stts();
} else {
old->fpu_counter = 0;
old->thread.fpu.last_cpu = ~0;
if (fpu.preload) {
new->fpu_counter++;
if (fpu_lazy_restore(new, cpu))
if (fpu_lazy_restore(new))
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
Expand All @@ -396,10 +377,8 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
*/
static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
{
if (fpu.preload) {
if (unlikely(restore_fpu_checking(new)))
__thread_fpu_end(new);
}
if (fpu.preload)
__math_state_restore(new);
}

/*
Expand Down Expand Up @@ -472,10 +451,8 @@ static inline void kernel_fpu_begin(void)
__save_init_fpu(me);
__thread_clear_has_fpu(me);
/* We do 'stts()' in kernel_fpu_end() */
} else {
percpu_write(fpu_owner_task, NULL);
} else
clts();
}
}

static inline void kernel_fpu_end(void)
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -374,8 +374,6 @@ union thread_xstate {
};

struct fpu {
unsigned int last_cpu;
unsigned int has_fpu;
union thread_xstate *state;
};

Expand Down Expand Up @@ -456,6 +454,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
unsigned long has_fpu;
struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1044,8 +1044,6 @@ DEFINE_PER_CPU(char *, irq_stack_ptr) =

DEFINE_PER_CPU(unsigned int, irq_count) = -1;

DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);

/*
* Special IST stacks which the CPU switches to when it calls
* an IST-marked descriptor entry. Up to 7 stacks (hardware
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,

task_user_gs(p) = get_user_gs(regs);

p->fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;
Expand Down Expand Up @@ -304,7 +303,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)

/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

fpu = switch_fpu_prepare(prev_p, next_p, cpu);
fpu = switch_fpu_prepare(prev_p, next_p);

/*
* Reload esp0.
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,

set_tsk_thread_flag(p, TIF_FORK);

p->fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;

savesegment(gs, p->thread.gsindex);
Expand Down Expand Up @@ -389,7 +388,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
unsigned fsindex, gsindex;
fpu_switch_t fpu;

fpu = switch_fpu_prepare(prev_p, next_p, cpu);
fpu = switch_fpu_prepare(prev_p, next_p);

/*
* Reload esp0, LDT and the page table pointer:
Expand Down
40 changes: 32 additions & 8 deletions trunk/arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,37 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
{
}

/*
* This gets called with the process already owning the
* FPU state, and with CR0.TS cleared. It just needs to
* restore the FPU register state.
*/
void __math_state_restore(struct task_struct *tsk)
{
/* We need a safe address that is cheap to find and that is already
in L1. We've just brought in "tsk->thread.has_fpu", so use that */
#define safe_address (tsk->thread.has_fpu)

/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
alternative_input(
ASM_NOP8 ASM_NOP2,
"emms\n\t" /* clear stack tags */
"fildl %P[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));

/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
__thread_fpu_end(tsk);
force_sig(SIGSEGV, tsk);
return;
}
}

/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
Expand Down Expand Up @@ -600,14 +631,7 @@ void math_state_restore(void)
}

__thread_fpu_begin(tsk);
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
__thread_fpu_end(tsk);
force_sig(SIGSEGV, tsk);
return;
}
__math_state_restore(tsk);

tsk->fpu_counter++;
}
Expand Down
4 changes: 2 additions & 2 deletions trunk/include/linux/digsig.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,15 @@ enum digest_algo {

struct pubkey_hdr {
uint8_t version; /* key format version */
time_t timestamp; /* key made, always 0 for now */
uint32_t timestamp; /* key made, always 0 for now */
uint8_t algo;
uint8_t nmpi;
char mpi[0];
} __packed;

struct signature_hdr {
uint8_t version; /* signature format version */
time_t timestamp; /* signature made */
uint32_t timestamp; /* signature made */
uint8_t algo;
uint8_t hash;
uint8_t keyid[8];
Expand Down

0 comments on commit 34550f6

Please sign in to comment.