Skip to content

Commit

Permalink
KVM: Move main vcpu loop into subarch independent code
Browse files Browse the repository at this point in the history
This simplifies adding new code as well as reducing overall code size.

Signed-off-by: Avi Kivity <avi@qumranet.com>
  • Loading branch information
Avi Kivity committed Oct 13, 2007
1 parent 29bd8a7 commit 04d2cc7
Show file tree
Hide file tree
Showing 4 changed files with 187 additions and 217 deletions.
9 changes: 8 additions & 1 deletion drivers/kvm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,13 +453,16 @@ struct kvm_x86_ops {
/* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
void (*vcpu_reset)(struct kvm_vcpu *vcpu);

void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*vcpu_decache)(struct kvm_vcpu *vcpu);

int (*set_guest_debug)(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg);
void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
Expand Down Expand Up @@ -491,12 +494,16 @@ struct kvm_x86_ops {

void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);

int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
unsigned char *hypercall_addr);
int (*get_irq)(struct kvm_vcpu *vcpu);
void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
struct kvm_run *run);
};

extern struct kvm_x86_ops *kvm_x86_ops;
Expand Down
124 changes: 123 additions & 1 deletion drivers/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/anon_inodes.h>
#include <linux/profile.h>

#include <asm/processor.h>
#include <asm/msr.h>
Expand Down Expand Up @@ -1970,6 +1971,127 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
}
EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);

/*
* Check if userspace requested an interrupt window, and that the
* interrupt window is open.
*
* No need to exit to userspace if we already have an interrupt queued.
*/
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
return (!vcpu->irq_summary &&
kvm_run->request_interrupt_window &&
vcpu->interrupt_window_open &&
(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
}

static void post_kvm_run_save(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection = 1;
else
kvm_run->ready_for_interrupt_injection =
(vcpu->interrupt_window_open &&
vcpu->irq_summary == 0);
}

static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;

if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
printk("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->sipi_vector);
kvm_lapic_reset(vcpu);
kvm_x86_ops->vcpu_reset(vcpu);
vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
}

preempted:
if (vcpu->guest_debug.enabled)
kvm_x86_ops->guest_debug_pre(vcpu);

again:
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
goto out;

preempt_disable();

kvm_x86_ops->prepare_guest_switch(vcpu);
kvm_load_guest_fpu(vcpu);

local_irq_disable();

if (signal_pending(current)) {
local_irq_enable();
preempt_enable();
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
goto out;
}

if (irqchip_in_kernel(vcpu->kvm))
kvm_x86_ops->inject_pending_irq(vcpu);
else if (!vcpu->mmio_read_completed)
kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);

vcpu->guest_mode = 1;

if (vcpu->requests)
if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
kvm_x86_ops->tlb_flush(vcpu);

kvm_x86_ops->run(vcpu, kvm_run);

vcpu->guest_mode = 0;
local_irq_enable();

++vcpu->stat.exits;

preempt_enable();

/*
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
kvm_x86_ops->cache_regs(vcpu);
profile_hit(KVM_PROFILING, (void *)vcpu->rip);
}

r = kvm_x86_ops->handle_exit(kvm_run, vcpu);

if (r > 0) {
if (dm_request_for_irq_injection(vcpu, kvm_run)) {
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
goto out;
}
if (!need_resched()) {
++vcpu->stat.light_exits;
goto again;
}
}

out:
if (r > 0) {
kvm_resched(vcpu);
goto preempted;
}

post_kvm_run_save(vcpu, kvm_run);

return r;
}


static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
Expand Down Expand Up @@ -2017,7 +2139,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_x86_ops->decache_regs(vcpu);
}

r = kvm_x86_ops->run(vcpu, kvm_run);
r = __vcpu_run(vcpu, kvm_run);

out:
if (vcpu->sigset_active)
Expand Down
Loading

0 comments on commit 04d2cc7

Please sign in to comment.