diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b15cde0a9b5c..14b2d033925e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -908,7 +908,8 @@ struct kvm_vcpu_arch { int (*complete_userspace_io)(struct kvm_vcpu *vcpu); gpa_t time; - struct pvclock_vcpu_time_info hv_clock; + s8 pvclock_tsc_shift; + u32 pvclock_tsc_mul; unsigned int hw_tsc_khz; struct gfn_to_pfn_cache pv_time; /* set guest stopped flag in pvclock flags field */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 597abd5429af..bdc6f449f192 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3170,6 +3170,7 @@ static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock, static int kvm_guest_time_update(struct kvm_vcpu *v) { + struct pvclock_vcpu_time_info hv_clock = {}; unsigned long flags, tgt_tsc_khz; unsigned seq; struct kvm_vcpu_arch *vcpu = &v->arch; @@ -3247,20 +3248,22 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, - &vcpu->hv_clock.tsc_shift, - &vcpu->hv_clock.tsc_to_system_mul); + &vcpu->pvclock_tsc_shift, + &vcpu->pvclock_tsc_mul); vcpu->hw_tsc_khz = tgt_tsc_khz; kvm_xen_update_tsc_info(v); } - vcpu->hv_clock.tsc_timestamp = tsc_timestamp; - vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; + hv_clock.tsc_shift = vcpu->pvclock_tsc_shift; + hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul; + hv_clock.tsc_timestamp = tsc_timestamp; + hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_guest_tsc = tsc_timestamp; /* If the host uses TSC clocksource, then it is stable */ - vcpu->hv_clock.flags = 0; + hv_clock.flags = 0; if (use_master_clock) - vcpu->hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT; + hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT; if (vcpu->pv_time.active) { /* @@ -3269,24 +3272,24 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) * is active/enabled. */ if (vcpu->pvclock_set_guest_stopped_request) { - vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; + hv_clock.flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } - kvm_setup_guest_pvclock(&vcpu->hv_clock, v, &vcpu->pv_time, 0, false); + kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0, false); - vcpu->hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED; + hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED; } #ifdef CONFIG_KVM_XEN if (vcpu->xen.vcpu_info_cache.active) - kvm_setup_guest_pvclock(&vcpu->hv_clock, v, &vcpu->xen.vcpu_info_cache, + kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache, offsetof(struct compat_vcpu_info, time), xen_pvclock_tsc_unstable); if (vcpu->xen.vcpu_time_info_cache.active) - kvm_setup_guest_pvclock(&vcpu->hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0, + kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0, xen_pvclock_tsc_unstable); #endif - kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); + kvm_hv_setup_tsc_page(v->kvm, &hv_clock); return 0; } diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 300a79f1fae5..2801c7bcc2ef 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -176,8 +176,8 @@ static int xen_get_guest_pvclock(struct kvm_vcpu *vcpu, * Sanity check TSC shift+multiplier to verify the guest's view of time * is more or less consistent. */ - if (hv_clock->tsc_shift != vcpu->arch.hv_clock.tsc_shift || - hv_clock->tsc_to_system_mul != vcpu->arch.hv_clock.tsc_to_system_mul) + if (hv_clock->tsc_shift != vcpu->arch.pvclock_tsc_shift || + hv_clock->tsc_to_system_mul != vcpu->arch.pvclock_tsc_mul) return -EINVAL; return 0; @@ -2316,8 +2316,8 @@ void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu) entry = kvm_find_cpuid_entry_index(vcpu, function, 1); if (entry) { - entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul; - entry->edx = vcpu->arch.hv_clock.tsc_shift; + entry->ecx = vcpu->arch.pvclock_tsc_mul; + entry->edx = vcpu->arch.pvclock_tsc_shift; } entry = kvm_find_cpuid_entry_index(vcpu, function, 2);