Skip to content

Commit

Permalink
KVM: x86: use guest_exit_irqoff
Browse files Browse the repository at this point in the history
This gains a few clock cycles per vmexit.  On Intel there is no need
anymore to enable the interrupts in vmx_handle_external_intr, since
we are using the "acknowledge interrupt on exit" feature.  AMD
needs to do that, and must be careful to avoid the interrupt shadow.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Paolo Bonzini committed Jul 1, 2016
1 parent 91fa0f8 commit f2485b3
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 12 deletions.
6 changes: 6 additions & 0 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -4935,6 +4935,12 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
local_irq_enable();
/*
* We must have an instruction with interrupts enabled, so
* the timer interrupt isn't delayed by the interrupt shadow.
*/
asm("nop");
local_irq_disable();
}

static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
Expand Down
4 changes: 1 addition & 3 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -8574,7 +8574,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
"push %[sp]\n\t"
#endif
"pushf\n\t"
"orl $0x200, (%%" _ASM_SP ")\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t"
"call *%[entry]\n\t"
:
Expand All @@ -8587,8 +8586,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
} else
local_irq_enable();
}
}

static bool vmx_has_high_real_mode_segbase(void)
Expand Down
11 changes: 2 additions & 9 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6709,16 +6709,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)

++vcpu->stat.exits;

/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();

guest_exit();
guest_exit_irqoff();

local_irq_enable();
preempt_enable();

vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Expand Down

0 comments on commit f2485b3

Please sign in to comment.