Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 297136
b: refs/heads/master
c: 5d3cb0f
h: refs/heads/master
v: v3
  • Loading branch information
Zachary Amsden authored and Avi Kivity committed Mar 8, 2012
1 parent 5d6310a commit e9b427a
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cc578287e3224d0da196cc1d226bdae6b068faa7
refs/heads/master: 5d3cb0f6a8e3af018a522ae8d36f8f7d2511b5d8
1 change: 1 addition & 0 deletions trunk/arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,7 @@ struct kvm_arch {
u64 last_tsc_nsec;
u64 last_tsc_offset;
u64 last_tsc_write;
u32 last_tsc_khz;

struct kvm_xen_hvm_config xen_hvm_config;

Expand Down
44 changes: 29 additions & 15 deletions trunk/arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -1025,40 +1025,54 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
s64 sdiff;
s64 nsdiff;

raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
sdiff = data - kvm->arch.last_tsc_write;
if (sdiff < 0)
sdiff = -sdiff;

/* n.b - signed multiplication and division required */
nsdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
nsdiff = (nsdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
/* do_div() only does unsigned */
asm("idivl %2; xor %%edx, %%edx"
: "=A"(nsdiff)
: "A"(nsdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
#endif
nsdiff -= elapsed;
if (nsdiff < 0)
nsdiff = -nsdiff;

/*
* Special case: close write to TSC within 5 seconds of
* another CPU is interpreted as an attempt to synchronize
* The 5 seconds is to accommodate host load / swapping as
* well as any reset of TSC during the boot process.
*
* In that case, for a reliable TSC, we can match TSC offsets,
* or make a best guest using elapsed value.
*/
if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
elapsed < 5ULL * NSEC_PER_SEC) {
* Special case: TSC write with a small delta (1 second) of virtual
* cycle time against real time is interpreted as an attempt to
* synchronize the CPU.
*
* For a reliable TSC, we can match TSC offsets, and for an unstable
* TSC, we add elapsed time in this computation. We could let the
* compensation code attempt to catch up if we fall behind, but
* it's better to try to match offsets from the beginning.
*/
if (nsdiff < NSEC_PER_SEC &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
if (!check_tsc_unstable()) {
offset = kvm->arch.last_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
offset += delta;
data += delta;
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
ns = kvm->arch.last_tsc_nsec;
}
kvm->arch.last_tsc_nsec = ns;
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_offset = offset;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);

Expand Down

0 comments on commit e9b427a

Please sign in to comment.