Skip to content

Commit

Permalink
KVM: x86: hyper-v: Allocate Hyper-V context lazily
Browse files Browse the repository at this point in the history
Hyper-V context is only needed for guests which use Hyper-V emulation in
KVM (e.g. Windows/Hyper-V guests) so we don't actually need to allocate
it in kvm_arch_vcpu_create(), we can postpone the action until Hyper-V
specific MSRs are accessed or SynIC is enabled.

Once allocated, let's keep the context alive for the lifetime of the vCPU
as an attempt to free it would require additional synchronization with
other vCPUs and normally it is not supposed to happen.

Note, Hyper-V style hypercall enablement is done by writing to
HV_X64_MSR_GUEST_OS_ID so we don't need to worry about allocating Hyper-V
context from kvm_hv_hypercall().

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-15-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Vitaly Kuznetsov authored and Paolo Bonzini committed Feb 9, 2021
1 parent 8f01455 commit fc08b62
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 18 deletions.
33 changes: 25 additions & 8 deletions arch/x86/kvm/hyperv.c
Original file line number Diff line number Diff line change
Expand Up @@ -838,6 +838,9 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
int i;

if (!hv_vcpu)
return;

for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_cleanup(&hv_vcpu->stimer[i]);

Expand Down Expand Up @@ -892,7 +895,7 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
stimer_prepare_msg(stimer);
}

int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_hv *hv_vcpu;
int i;
Expand All @@ -910,19 +913,23 @@ int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_init(&hv_vcpu->stimer[i], i);

hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);

return 0;
}

void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_synic *synic;
int r;

hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
}
if (!to_hv_vcpu(vcpu)) {
r = kvm_hv_vcpu_init(vcpu);
if (r)
return r;
}

int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
synic = to_hv_synic(vcpu);

/*
* Hyper-V SynIC auto EOI SINT's are
Expand Down Expand Up @@ -1479,6 +1486,11 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (!host && !vcpu->arch.hyperv_enabled)
return 1;

if (!to_hv_vcpu(vcpu)) {
if (kvm_hv_vcpu_init(vcpu))
return 1;
}

if (kvm_hv_msr_partition_wide(msr)) {
int r;

Expand All @@ -1497,6 +1509,11 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
if (!host && !vcpu->arch.hyperv_enabled)
return 1;

if (!to_hv_vcpu(vcpu)) {
if (kvm_hv_vcpu_init(vcpu))
return 1;
}

if (kvm_hv_msr_partition_wide(msr)) {
int r;

Expand Down
2 changes: 0 additions & 2 deletions arch/x86/kvm/hyperv.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,6 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);

int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);

bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
Expand Down
9 changes: 1 addition & 8 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -10083,12 +10083,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;

if (kvm_hv_vcpu_init(vcpu))
goto free_guest_fpu;

r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
goto free_hv_vcpu;
goto free_guest_fpu;

vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
Expand All @@ -10099,8 +10096,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu_put(vcpu);
return 0;

free_hv_vcpu:
kvm_hv_vcpu_uninit(vcpu);
free_guest_fpu:
kvm_free_guest_fpu(vcpu);
free_user_fpu:
Expand All @@ -10124,8 +10119,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;

kvm_hv_vcpu_postcreate(vcpu);

if (mutex_lock_killable(&vcpu->mutex))
return;
vcpu_load(vcpu);
Expand Down

0 comments on commit fc08b62

Please sign in to comment.