diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index c4906f73603d9..026ca50ef73ee 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -89,7 +89,7 @@ KVM_X86_OP(load_mmu_pgd) KVM_X86_OP_NULL(has_wbinvd_exit) KVM_X86_OP(get_l2_tsc_offset) KVM_X86_OP(get_l2_tsc_multiplier) -KVM_X86_OP(write_l1_tsc_offset) +KVM_X86_OP(write_tsc_offset) KVM_X86_OP(get_exit_info) KVM_X86_OP(check_intercept) KVM_X86_OP(handle_exit_irqoff) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 14546c30bc635..08773980393d4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1313,8 +1313,7 @@ struct kvm_x86_ops { u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu); u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu); - /* Returns actual tsc_offset set in active VMCS */ - u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); + void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); /* * Retrieve somewhat arbitrary exit information. Intended to be used diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 95ae2734760eb..623f3c4b795a4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1092,26 +1092,13 @@ static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) return kvm_default_tsc_scaling_ratio; } -static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); - u64 g_tsc_offset = 0; - - if (is_guest_mode(vcpu)) { - /* Write L1's TSC offset. */ - g_tsc_offset = svm->vmcb->control.tsc_offset - - svm->vmcb01.ptr->control.tsc_offset; - svm->vmcb01.ptr->control.tsc_offset = offset; - } - - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - svm->vmcb->control.tsc_offset - g_tsc_offset, - offset); - - svm->vmcb->control.tsc_offset = offset + g_tsc_offset; + svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; + svm->vmcb->control.tsc_offset = offset; vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); - return svm->vmcb->control.tsc_offset; } /* Evaluate instruction intercepts that depend on guest CPUID features. */ @@ -4538,7 +4525,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .get_l2_tsc_offset = svm_get_l2_tsc_offset, .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, - .write_l1_tsc_offset = svm_write_l1_tsc_offset, + .write_tsc_offset = svm_write_tsc_offset, .load_mmu_pgd = svm_load_mmu_pgd, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 2ce2c73645bf9..54d08bebf9c62 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1808,26 +1808,9 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) return kvm_default_tsc_scaling_ratio; } -static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - u64 g_tsc_offset = 0; - - /* - * We're here if L1 chose not to trap WRMSR to TSC. According - * to the spec, this should set L1's TSC; The offset that L1 - * set for L2 remains unchanged, and still needs to be added - * to the newly set TSC to get L2's TSC. - */ - if (is_guest_mode(vcpu) && - (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)) - g_tsc_offset = vmcs12->tsc_offset; - - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - vcpu->arch.tsc_offset - g_tsc_offset, - offset); - vmcs_write64(TSC_OFFSET, offset + g_tsc_offset); - return offset + g_tsc_offset; + vmcs_write64(TSC_OFFSET, offset); } /* @@ -7723,7 +7706,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .get_l2_tsc_offset = vmx_get_l2_tsc_offset, .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier, - .write_l1_tsc_offset = vmx_write_l1_tsc_offset, + .write_tsc_offset = vmx_write_tsc_offset, .load_mmu_pgd = vmx_load_mmu_pgd, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 61024ee9e85f4..b42f6c8674e63 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2359,10 +2359,28 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) } EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); -static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) +static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) { - vcpu->arch.l1_tsc_offset = offset; - vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset); + trace_kvm_write_tsc_offset(vcpu->vcpu_id, + vcpu->arch.l1_tsc_offset, + l1_offset); + + vcpu->arch.l1_tsc_offset = l1_offset; + + /* + * If we are here because L1 chose not to trap WRMSR to TSC then + * according to the spec this should set L1's TSC (as opposed to + * setting L1's offset for L2). + */ + if (is_guest_mode(vcpu)) + vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( + l1_offset, + static_call(kvm_x86_get_l2_tsc_offset)(vcpu), + static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); + else + vcpu->arch.tsc_offset = l1_offset; + + static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset); } static inline bool kvm_check_tsc_unstable(void)