Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 297135
b: refs/heads/master
c: cc57828
h: refs/heads/master
i:
  297133: 02e49f8
  297131: 8776919
  297127: bab764a
  297119: 61458c0
v: v3
  • Loading branch information
Zachary Amsden authored and Avi Kivity committed Mar 8, 2012
1 parent 3b40b03 commit 5d6310a
Show file tree
Hide file tree
Showing 6 changed files with 72 additions and 59 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a59cb29e4d81e025192550c2703f305637f016f6
refs/heads/master: cc578287e3224d0da196cc1d226bdae6b068faa7
9 changes: 5 additions & 4 deletions trunk/arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -422,10 +422,11 @@ struct kvm_vcpu_arch {
u64 last_kernel_ns;
u64 last_tsc_nsec;
u64 last_tsc_write;
u32 virtual_tsc_khz;
bool tsc_catchup;
u32 tsc_catchup_mult;
s8 tsc_catchup_shift;
bool tsc_always_catchup;
s8 virtual_tsc_shift;
u32 virtual_tsc_mult;
u32 virtual_tsc_khz;

atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
Expand Down Expand Up @@ -651,7 +652,7 @@ struct kvm_x86_ops {

bool (*has_wbinvd_exit)(void);

void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -731,7 +731,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
u64 ns = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
unsigned long this_tsc_khz = vcpu_tsc_khz(vcpu);
unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
unsigned long flags;

if (unlikely(!tscdeadline || !this_tsc_khz))
Expand Down
20 changes: 12 additions & 8 deletions trunk/arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -964,20 +964,25 @@ static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
return _tsc;
}

static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 ratio;
u64 khz;

/* TSC scaling supported? */
if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR))
/* Guest TSC same frequency as host TSC? */
if (!scale) {
svm->tsc_ratio = TSC_RATIO_DEFAULT;
return;
}

/* TSC-Scaling disabled or guest TSC same frequency as host TSC? */
if (user_tsc_khz == 0) {
vcpu->arch.virtual_tsc_khz = 0;
svm->tsc_ratio = TSC_RATIO_DEFAULT;
/* TSC scaling supported? */
if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
if (user_tsc_khz > tsc_khz) {
vcpu->arch.tsc_catchup = 1;
vcpu->arch.tsc_always_catchup = 1;
} else
WARN(1, "user requested TSC rate below hardware speed\n");
return;
}

Expand All @@ -992,7 +997,6 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
user_tsc_khz);
return;
}
vcpu->arch.virtual_tsc_khz = user_tsc_khz;
svm->tsc_ratio = ratio;
}

Expand Down
16 changes: 11 additions & 5 deletions trunk/arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1817,13 +1817,19 @@ u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu)
}

/*
* Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ
* ioctl. In this case the call-back should update internal vmx state to make
* the changes effective.
* Engage any workarounds for mis-matched TSC rates. Currently limited to
* software catchup for faster rates on slower CPUs.
*/
static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
/* Nothing to do here */
if (!scale)
return;

if (user_tsc_khz > tsc_khz) {
vcpu->arch.tsc_catchup = 1;
vcpu->arch.tsc_always_catchup = 1;
} else
WARN(1, "user requested TSC rate below hardware speed\n");
}

/*
Expand Down
82 changes: 42 additions & 40 deletions trunk/arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32 kvm_max_guest_tsc_khz;
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);

/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);

#define KVM_NR_SHARED_MSRS 16

struct kvm_shared_msrs_global {
Expand Down Expand Up @@ -968,49 +972,50 @@ static inline u64 get_kernel_ns(void)
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
unsigned long max_tsc_khz;

static inline int kvm_tsc_changes_freq(void)
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
int cpu = get_cpu();
int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
cpufreq_quick_get(cpu) != 0;
put_cpu();
return ret;
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}

u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
if (vcpu->arch.virtual_tsc_khz)
return vcpu->arch.virtual_tsc_khz;
else
return __this_cpu_read(cpu_tsc_khz);
u64 v = (u64)khz * (1000000 + ppm);
do_div(v, 1000000);
return v;
}

static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{
u64 ret;
u32 thresh_lo, thresh_hi;
int use_scaling = 0;

WARN_ON(preemptible());
if (kvm_tsc_changes_freq())
printk_once(KERN_WARNING
"kvm: unreliable cycle conversion on adjustable rate TSC\n");
ret = nsec * vcpu_tsc_khz(vcpu);
do_div(ret, USEC_PER_SEC);
return ret;
}

static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{
/* Compute a scale to convert nanoseconds in TSC cycles */
kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
&vcpu->arch.tsc_catchup_shift,
&vcpu->arch.tsc_catchup_mult);
&vcpu->arch.virtual_tsc_shift,
&vcpu->arch.virtual_tsc_mult);
vcpu->arch.virtual_tsc_khz = this_tsc_khz;

/*
* Compute the variation in TSC rate which is acceptable
* within the range of tolerance and decide if the
* rate being applied is within that bounds of the hardware
* rate. If so, no scaling or compensation need be done.
*/
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
use_scaling = 1;
}
kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
}

static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{
u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
vcpu->arch.tsc_catchup_mult,
vcpu->arch.tsc_catchup_shift);
vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
tsc += vcpu->arch.last_tsc_write;
return tsc;
}
Expand Down Expand Up @@ -1077,7 +1082,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_save(flags);
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
kernel_ns = get_kernel_ns();
this_tsc_khz = vcpu_tsc_khz(v);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
Expand Down Expand Up @@ -2804,26 +2809,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
u32 user_tsc_khz;

r = -EINVAL;
if (!kvm_has_tsc_control)
break;

user_tsc_khz = (u32)arg;

if (user_tsc_khz >= kvm_max_guest_tsc_khz)
goto out;

kvm_x86_ops->set_tsc_khz(vcpu, user_tsc_khz);
if (user_tsc_khz == 0)
user_tsc_khz = tsc_khz;

kvm_set_tsc_khz(vcpu, user_tsc_khz);

r = 0;
goto out;
}
case KVM_GET_TSC_KHZ: {
r = -EIO;
if (check_tsc_unstable())
goto out;

r = vcpu_tsc_khz(vcpu);

r = vcpu->arch.virtual_tsc_khz;
goto out;
}
default:
Expand Down Expand Up @@ -5312,6 +5312,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
profile_hit(KVM_PROFILING, (void *)rip);
}

if (unlikely(vcpu->arch.tsc_always_catchup))
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

kvm_lapic_sync_from_vapic(vcpu);

Expand Down Expand Up @@ -6004,7 +6006,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
}
vcpu->arch.pio_data = page_address(page);

kvm_init_tsc_catchup(vcpu, max_tsc_khz);
kvm_set_tsc_khz(vcpu, max_tsc_khz);

r = kvm_mmu_create(vcpu);
if (r < 0)
Expand Down

0 comments on commit 5d6310a

Please sign in to comment.