Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 324005
b: refs/heads/master
c: b1a74bf
h: refs/heads/master
i:
  324003: faac3ac
v: v3
  • Loading branch information
Suresh Siddha authored and H. Peter Anvin committed Sep 21, 2012
1 parent 8346868 commit cb49494
Show file tree
Hide file tree
Showing 5 changed files with 41 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a8615af4bc3621cb01096541dafa6f68352ec2d9
refs/heads/master: b1a74bf8212367be2b1d6685c11a84e056eaaaf1
28 changes: 26 additions & 2 deletions trunk/arch/x86/include/asm/i387.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,32 @@ extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern void math_state_restore(void);

extern bool irq_fpu_usable(void);
extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);

/*
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled
* and they don't touch the preempt state on their own.
* If you enable preemption after __kernel_fpu_begin(), preempt notifier
* should call the __kernel_fpu_end() to prevent the kernel/user FPU
* state from getting corrupted. KVM for example uses this model.
*
* All other cases use kernel_fpu_begin/end() which disable preemption
* during kernel FPU usage.
*/
extern void __kernel_fpu_begin(void);
extern void __kernel_fpu_end(void);

static inline void kernel_fpu_begin(void)
{
WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
__kernel_fpu_begin();
}

static inline void kernel_fpu_end(void)
{
__kernel_fpu_end();
preempt_enable();
}

/*
* Some instructions like VIA's padlock instructions generate a spurious
Expand Down
13 changes: 5 additions & 8 deletions trunk/arch/x86/kernel/i387.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,32 +73,29 @@ bool irq_fpu_usable(void)
}
EXPORT_SYMBOL(irq_fpu_usable);

void kernel_fpu_begin(void)
void __kernel_fpu_begin(void)
{
struct task_struct *me = current;

WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
if (__thread_has_fpu(me)) {
__save_init_fpu(me);
__thread_clear_has_fpu(me);
/* We do 'stts()' in kernel_fpu_end() */
/* We do 'stts()' in __kernel_fpu_end() */
} else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL);
clts();
}
}
EXPORT_SYMBOL(kernel_fpu_begin);
EXPORT_SYMBOL(__kernel_fpu_begin);

void kernel_fpu_end(void)
void __kernel_fpu_end(void)
{
if (use_eager_fpu())
math_state_restore();
else
stts();
preempt_enable();
}
EXPORT_SYMBOL(kernel_fpu_end);
EXPORT_SYMBOL(__kernel_fpu_end);

void unlazy_fpu(struct task_struct *tsk)
{
Expand Down
10 changes: 7 additions & 3 deletions trunk/arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1493,8 +1493,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (user_has_fpu())
clts();
/*
* If the FPU is not active (through the host task or
* the guest vcpu), then restore the cr0.TS bit.
*/
if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
stts();
load_gdt(&__get_cpu_var(host_gdt));
}

Expand Down Expand Up @@ -3730,7 +3734,7 @@ static void vmx_set_constant_host_state(void)
unsigned long tmpl;
struct desc_ptr dt;

vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -5972,7 +5972,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
kernel_fpu_begin();
__kernel_fpu_begin();
fpu_restore_checking(&vcpu->arch.guest_fpu);
trace_kvm_fpu(1);
}
Expand All @@ -5986,7 +5986,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)

vcpu->guest_fpu_loaded = 0;
fpu_save_init(&vcpu->arch.guest_fpu);
kernel_fpu_end();
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
trace_kvm_fpu(0);
Expand Down

0 comments on commit cb49494

Please sign in to comment.