Skip to content

Commit

Permalink
KVM: x86: Move TSC offset writes to common code
Browse files Browse the repository at this point in the history
Also, ensure that the storing of the offset and the reading of the TSC
are never preempted by taking a spinlock.  While the lock is overkill
now, it is useful later in this patch series.

Signed-off-by: Zachary Amsden <zamsden@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
  • Loading branch information
Zachary Amsden authored and Avi Kivity committed Oct 24, 2010
1 parent f4e1b3c commit 99e3e30
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 9 deletions.
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -395,6 +395,7 @@ struct kvm_arch {

unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
spinlock_t tsc_write_lock;

struct kvm_xen_hvm_config xen_hvm_config;

Expand Down Expand Up @@ -521,6 +522,8 @@ struct kvm_x86_ops {

bool (*has_wbinvd_exit)(void);

void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

const struct trace_print_flags *exit_reasons_str;
};

Expand Down
6 changes: 4 additions & 2 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -915,7 +915,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
svm_write_tsc_offset(&svm->vcpu, 0-native_read_tsc());
kvm_write_tsc(&svm->vcpu, 0);

err = fx_init(&svm->vcpu);
if (err)
Expand Down Expand Up @@ -2581,7 +2581,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)

switch (ecx) {
case MSR_IA32_TSC:
svm_write_tsc_offset(vcpu, data - native_read_tsc());
kvm_write_tsc(vcpu, data);
break;
case MSR_STAR:
svm->vmcb->save.star = data;
Expand Down Expand Up @@ -3551,6 +3551,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_supported_cpuid = svm_set_supported_cpuid,

.has_wbinvd_exit = svm_has_wbinvd_exit,

.write_tsc_offset = svm_write_tsc_offset,
};

static int __init svm_init(void)
Expand Down
13 changes: 6 additions & 7 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1146,10 +1146,9 @@ static u64 guest_read_tsc(void)
}

/*
* writes 'guest_tsc' into guest's timestamp counter "register"
* guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
* writes 'offset' into guest's timestamp counter offset register
*/
static void vmx_write_tsc_offset(u64 offset)
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
vmcs_write64(TSC_OFFSET, offset);
}
Expand Down Expand Up @@ -1224,7 +1223,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
u64 host_tsc;
int ret = 0;

switch (msr_index) {
Expand Down Expand Up @@ -1254,8 +1252,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TSC:
rdtscll(host_tsc);
vmx_write_tsc_offset(data - host_tsc);
kvm_write_tsc(vcpu, data);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
Expand Down Expand Up @@ -2653,7 +2650,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);

vmx_write_tsc_offset(0-native_read_tsc());
kvm_write_tsc(&vmx->vcpu, 0);

return 0;
}
Expand Down Expand Up @@ -4348,6 +4345,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_supported_cpuid = vmx_set_supported_cpuid,

.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,

.write_tsc_offset = vmx_write_tsc_offset,
};

static int __init vmx_init(void)
Expand Down
18 changes: 18 additions & 0 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -895,6 +895,22 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *

static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);

void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
u64 offset;
unsigned long flags;

spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = data - native_read_tsc();
kvm_x86_ops->write_tsc_offset(vcpu, offset);
spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);

/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
}
EXPORT_SYMBOL_GPL(kvm_write_tsc);

static void kvm_write_guest_time(struct kvm_vcpu *v)
{
struct timespec ts;
Expand Down Expand Up @@ -5495,6 +5511,8 @@ struct kvm *kvm_arch_create_vm(void)
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);

spin_lock_init(&kvm->arch.tsc_write_lock);

return kvm;
}

Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kvm/x86.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);

void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);

#endif

0 comments on commit 99e3e30

Please sign in to comment.