Skip to content

Commit

Permalink
KVM: nVMX: Rework interception of IRQs and NMIs
Browse files Browse the repository at this point in the history
Move the check for leaving L2 on pending and intercepted IRQs or NMIs
from the *_allowed handler into a dedicated callback. Invoke this
callback at the relevant points before KVM checks if IRQs/NMIs can be
injected. The callback has the task to switch from L2 to L1 if needed
and inject the proper vmexit events.

The rework fixes L2 wakeups from HLT and provides the foundation for
preemption timer emulation.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Jan Kiszka authored and Paolo Bonzini committed Mar 11, 2014
1 parent b010926 commit b6b8a14
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 36 deletions.
2 changes: 2 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,8 @@ struct kvm_x86_ops {
enum x86_intercept_stage stage);
void (*handle_external_intr)(struct kvm_vcpu *vcpu);
bool (*mpx_supported)(void);

int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
};

struct kvm_arch_async_pf {
Expand Down
67 changes: 38 additions & 29 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -4631,22 +4631,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)

static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
if (nested_exit_on_nmi(vcpu)) {
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
NMI_VECTOR | INTR_TYPE_NMI_INTR |
INTR_INFO_VALID_MASK, 0);
/*
* The NMI-triggered VM exit counts as injection:
* clear this one and block further NMIs.
*/
vcpu->arch.nmi_pending = 0;
vmx_set_nmi_mask(vcpu, true);
return 0;
}
}
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;

if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
return 0;
Expand All @@ -4658,19 +4644,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)

static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
if (nested_exit_on_intr(vcpu)) {
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
0, 0);
/*
* fall through to normal code, but now in L1, not L2
*/
}
}

return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
return (!to_vmx(vcpu)->nested.nested_run_pending &&
vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
Expand Down Expand Up @@ -8172,6 +8147,35 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
}
}

static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);

if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
NMI_VECTOR | INTR_TYPE_NMI_INTR |
INTR_INFO_VALID_MASK, 0);
/*
* The NMI-triggered VM exit counts as injection:
* clear this one and block further NMIs.
*/
vcpu->arch.nmi_pending = 0;
vmx_set_nmi_mask(vcpu, true);
return 0;
}

if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
nested_exit_on_intr(vcpu)) {
if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
}

return 0;
}

/*
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
Expand Down Expand Up @@ -8512,6 +8516,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
vmx->nested.sync_shadow_vmcs = true;

/* in case we halted in L2 */
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}

/*
Expand Down Expand Up @@ -8652,6 +8659,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.check_intercept = vmx_check_intercept,
.handle_external_intr = vmx_handle_external_intr,
.mpx_supported = vmx_mpx_supported,

.check_nested_events = vmx_check_nested_events,
};

static int __init vmx_init(void)
Expand Down
26 changes: 19 additions & 7 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -5821,8 +5821,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}

static void inject_pending_event(struct kvm_vcpu *vcpu)
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
{
int r;

/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
trace_kvm_inj_exception(vcpu->arch.exception.nr,
Expand All @@ -5832,17 +5834,23 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
vcpu->arch.exception.reinject);
return;
return 0;
}

if (vcpu->arch.nmi_injected) {
kvm_x86_ops->set_nmi(vcpu);
return;
return 0;
}

if (vcpu->arch.interrupt.pending) {
kvm_x86_ops->set_irq(vcpu);
return;
return 0;
}

if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
if (r != 0)
return r;
}

/* try to inject new event if pending */
Expand All @@ -5859,6 +5867,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
kvm_x86_ops->set_irq(vcpu);
}
}
return 0;
}

static void process_nmi(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -5963,10 +5972,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}

inject_pending_event(vcpu);

if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
else if (vcpu->arch.nmi_pending)
req_immediate_exit =
kvm_x86_ops->enable_nmi_window(vcpu) != 0;
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
Expand Down Expand Up @@ -7296,6 +7305,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
kvm_x86_ops->check_nested_events(vcpu, false);

return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
Expand Down

0 comments on commit b6b8a14

Please sign in to comment.