Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 125182
b: refs/heads/master
c: 3b86cd9
h: refs/heads/master
v: v3
  • Loading branch information
Jan Kiszka authored and Avi Kivity committed Dec 31, 2008
1 parent 9bdc771 commit 0e566b9
Show file tree
Hide file tree
Showing 2 changed files with 116 additions and 60 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 487b391d6ea9b1d0e2e0440466fb3130e78c98d9
refs/heads/master: 3b86cd9967242f3f3d775ee015fb814a349ed5e6
174 changes: 115 additions & 59 deletions trunk/arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,11 @@ struct vcpu_vmx {
} rmode;
int vpid;
bool emulation_required;

/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
s64 vnmi_blocked_time;
};

static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -2230,6 +2235,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)

vmx->vcpu.arch.rmode.active = 0;

vmx->soft_vnmi_blocked = 0;

vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Expand Down Expand Up @@ -2335,6 +2342,29 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
return ret;
}

static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;

cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}

static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;

if (!cpu_has_virtual_nmis()) {
enable_irq_window(vcpu);
return;
}

cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}

static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
Expand All @@ -2360,6 +2390,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);

if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
* finding the next open IRQ window. This, in turn, depends on
* well-behaving guests: They have to keep IRQs disabled at
* least as long as the NMI handler runs. Otherwise we may
* cause NMI nesting, maybe breaking the guest. But as this is
* highly unlikely, we can live with the residual risk.
*/
vmx->soft_vnmi_blocked = 1;
vmx->vnmi_blocked_time = 0;
}

++vcpu->stat.nmi_injections;
if (vcpu->arch.rmode.active) {
vmx->rmode.irq.pending = true;
Expand All @@ -2384,6 +2427,8 @@ static void vmx_update_window_states(struct kvm_vcpu *vcpu)
!(guest_intr & (GUEST_INTR_STATE_STI |
GUEST_INTR_STATE_MOV_SS |
GUEST_INTR_STATE_NMI));
if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
vcpu->arch.nmi_window_open = 0;

vcpu->arch.interrupt_window_open =
((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
Expand All @@ -2403,55 +2448,31 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
kvm_queue_interrupt(vcpu, irq);
}

static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;

cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}

static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;

if (!cpu_has_virtual_nmis())
return;

cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}

static void do_interrupt_requests(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
vmx_update_window_states(vcpu);

if (cpu_has_virtual_nmis()) {
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
if (vcpu->arch.nmi_window_open) {
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = true;
} else {
enable_nmi_window(vcpu);
return;
}
}
if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending
|| kvm_run->request_nmi_window)
enable_nmi_window(vcpu);
else if (vcpu->arch.irq_summary
|| kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
if (vcpu->arch.nmi_window_open) {
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = true;
} else {
enable_nmi_window(vcpu);
return;
}
if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window)
}
if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending || kvm_run->request_nmi_window)
enable_nmi_window(vcpu);
else if (vcpu->arch.irq_summary
|| kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
return;
}
if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window)
enable_nmi_window(vcpu);

if (vcpu->arch.interrupt_window_open) {
if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
Expand Down Expand Up @@ -3097,6 +3118,37 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
printk(KERN_WARNING "%s: unexpected, valid vectoring info "
"(0x%x) and exit reason is 0x%x\n",
__func__, vectoring_info, exit_reason);

if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
if (vcpu->arch.interrupt_window_open) {
vmx->soft_vnmi_blocked = 0;
vcpu->arch.nmi_window_open = 1;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
(kvm_run->request_nmi_window || vcpu->arch.nmi_pending)) {
/*
* This CPU don't support us in finding the end of an
* NMI-blocked window if the guest runs with IRQs
* disabled. So we pull the trigger after 1 s of
* futile waiting, but inform the user about this.
*/
printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
"state on VCPU %d after 1 s timeout\n",
__func__, vcpu->vcpu_id);
vmx->soft_vnmi_blocked = 0;
vmx->vcpu.arch.nmi_window_open = 1;
}

/*
* If the user space waits to inject an NNI, exit ASAP
*/
if (vcpu->arch.nmi_window_open && kvm_run->request_nmi_window
&& !vcpu->arch.nmi_pending) {
kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN;
++vcpu->stat.nmi_window_exits;
return 0;
}
}

if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
Expand Down Expand Up @@ -3146,7 +3198,9 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
if (unblock_nmi && vector != DF_VECTOR)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
}
} else if (unlikely(vmx->soft_vnmi_blocked))
vmx->vnmi_blocked_time +=
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));

idt_vectoring_info = vmx->idt_vectoring_info;
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
Expand Down Expand Up @@ -3186,27 +3240,25 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)

vmx_update_window_states(vcpu);

if (cpu_has_virtual_nmis()) {
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
if (vcpu->arch.interrupt.pending) {
enable_nmi_window(vcpu);
} else if (vcpu->arch.nmi_window_open) {
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = true;
} else {
enable_nmi_window(vcpu);
return;
}
}
if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu))
enable_irq_window(vcpu);
if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
if (vcpu->arch.interrupt.pending) {
enable_nmi_window(vcpu);
} else if (vcpu->arch.nmi_window_open) {
vcpu->arch.nmi_pending = false;
vcpu->arch.nmi_injected = true;
} else {
enable_nmi_window(vcpu);
return;
}
}
if (vcpu->arch.nmi_injected) {
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu))
enable_irq_window(vcpu);
return;
}
if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
if (vcpu->arch.interrupt_window_open)
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
Expand Down Expand Up @@ -3255,6 +3307,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info;

/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();

/* Handle invalid guest state instead of entering VMX */
if (vmx->emulation_required && emulate_invalid_guest_state) {
handle_invalid_guest_state(vcpu, kvm_run);
Expand Down

0 comments on commit 0e566b9

Please sign in to comment.