Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 287654
b: refs/heads/master
c: b3b0870
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Feb 16, 2012
1 parent e975dba commit f69ca3d
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 69 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6d59d7a9f5b723a7ac1925c136e93ec83c0c3043
refs/heads/master: b3b0870ef3ffed72b92415423da864f440f57ad6
1 change: 0 additions & 1 deletion trunk/arch/x86/include/asm/i387.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
extern void math_state_restore(void);
extern void __math_state_restore(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);

extern user_regset_active_fn fpregs_active, xfpregs_active;
Expand Down
20 changes: 0 additions & 20 deletions trunk/arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -299,23 +299,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
bool preload_fpu;

/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;

__unlazy_fpu(prev_p);

/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
prefetch(next->fpu.state);

/*
* Reload esp0.
*/
Expand Down Expand Up @@ -354,11 +342,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);

/* If we're going to preload the fpu context, make sure clts
is run while we're batching the cpu state updates. */
if (preload_fpu)
clts();

/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
Expand All @@ -368,9 +351,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
arch_end_context_switch(next_p);

if (preload_fpu)
__math_state_restore();

/*
* Restore %gs if needed (which is common)
*/
Expand Down
23 changes: 0 additions & 23 deletions trunk/arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -386,18 +386,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
unsigned fsindex, gsindex;
bool preload_fpu;

/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;

/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
prefetch(next->fpu.state);

/*
* Reload esp0, LDT and the page table pointer:
Expand Down Expand Up @@ -430,10 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Must be after DS reload */
__unlazy_fpu(prev_p);

/* Make sure cpu is ready for new context */
if (preload_fpu)
clts();

/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
Expand Down Expand Up @@ -492,13 +476,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);

/*
* Preload the FPU context, now that we've determined that the
* task is likely to be using it.
*/
if (preload_fpu)
__math_state_restore();

return prev_p;
}

Expand Down
35 changes: 11 additions & 24 deletions trunk/arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -570,28 +570,6 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
{
}

/*
* __math_state_restore assumes that cr0.TS is already clear and the
* fpu state is all ready for use. Used during context switch.
*/
void __math_state_restore(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;

/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
stts();
force_sig(SIGSEGV, tsk);
return;
}

__thread_set_has_fpu(thread); /* clts in caller! */
tsk->fpu_counter++;
}

/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
Expand Down Expand Up @@ -622,9 +600,18 @@ void math_state_restore(void)
local_irq_disable();
}

clts(); /* Allow maths ops (or we recurse) */
__thread_fpu_begin(thread);

__math_state_restore();
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
__thread_fpu_end(thread);
force_sig(SIGSEGV, tsk);
return;
}

tsk->fpu_counter++;
}
EXPORT_SYMBOL_GPL(math_state_restore);

Expand Down

0 comments on commit f69ca3d

Please sign in to comment.