Skip to content

Commit

Permalink
x86/asm/tsc: Rename native_read_tsc() to rdtsc()
Browse files Browse the repository at this point in the history
Now that there is no paravirt TSC, the "native" is
inappropriate. The function does RDTSC, so give it the obvious
name: rdtsc().

Suggested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/fd43e16281991f096c1e4d21574d9e1402c62d39.1434501121.git.luto@kernel.org
[ Ported it to v4.2-rc1. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Andy Lutomirski authored and Ingo Molnar committed Jul 6, 2015
1 parent fe47ae6 commit 4ea1636
Show file tree
Hide file tree
Showing 25 changed files with 59 additions and 50 deletions.
2 changes: 1 addition & 1 deletion arch/x86/boot/compressed/aslr.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ static unsigned long get_random_long(void)

if (has_cpuflag(X86_FEATURE_TSC)) {
debug_putstr(" RDTSC");
raw = native_read_tsc();
raw = rdtsc();

random ^= raw;
use_i8254 = false;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/entry/vdso/vclock_gettime.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)native_read_tsc();
ret = (cycle_t)rdtsc();

last = gtod->cycle_last;

Expand Down
11 changes: 10 additions & 1 deletion arch/x86/include/asm/msr.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);

static __always_inline unsigned long long native_read_tsc(void)
/**
* rdtsc() - returns the current TSC without ordering constraints
*
* rdtsc() returns the result of RDTSC as a 64-bit integer. The
* only ordering constraint it supplies is the ordering implied by
* "asm volatile": it will put the RDTSC in the place you expect. The
* CPU can and will speculatively execute that RDTSC, though, so the
* results can be non-monotonic if compared on different CPUs.
*/
static __always_inline unsigned long long rdtsc(void)
{
DECLARE_ARGS(val, low, high);

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/pvclock.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{
u64 delta = native_read_tsc() - src->tsc_timestamp;
u64 delta = rdtsc() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/stackprotector.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
* on during the bootup the random pool has true entropy too.
*/
get_random_bytes(&canary, sizeof(canary));
tsc = native_read_tsc();
tsc = rdtsc();
canary += tsc + (tsc << 32UL);

current->stack_canary = canary;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/tsc.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void)
return 0;
#endif

return native_read_tsc();
return rdtsc();
}

extern void tsc_init(void);
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/apb_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)

/* Verify whether apbt counter works */
t1 = dw_apb_clocksource_read(clocksource_apbt);
start = native_read_tsc();
start = rdtsc();

/*
* We don't know the TSC frequency yet, but waiting for
Expand All @@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
*/
do {
rep_nop();
now = native_read_tsc();
now = rdtsc();
} while ((now - start) < 200000UL);

/* APBT is the only always on clocksource, it has to work! */
Expand Down Expand Up @@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
old = dw_apb_clocksource_read(clocksource_apbt);
old += loop;

t1 = native_read_tsc();
t1 = rdtsc();

do {
new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old);

t2 = native_read_tsc();
t2 = rdtsc();

shift = 5;
if (unlikely(loop >> shift == 0)) {
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/apic/apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
{
u64 tsc;

tsc = native_read_tsc();
tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
Expand Down Expand Up @@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
unsigned long pm = acpi_pm_read_early();

if (cpu_has_tsc)
tsc = native_read_tsc();
tsc = rdtsc();

switch (lapic_cal_loops++) {
case 0:
Expand Down Expand Up @@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
long long max_loops = cpu_khz ? cpu_khz : 1000000;

if (cpu_has_tsc)
tsc = native_read_tsc();
tsc = rdtsc();

if (disable_apic) {
disable_ioapic_support();
Expand Down Expand Up @@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
}
if (queued) {
if (cpu_has_tsc && cpu_khz) {
ntsc = native_read_tsc();
ntsc = rdtsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else
max_loops--;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c)

n = K6_BUG_LOOP;
f_vide = vide;
d = native_read_tsc();
d = rdtsc();
while (n--)
f_vide();
d2 = native_read_tsc();
d2 = rdtsc();
d = d2-d;

if (d > 20*K6_BUG_LOOP)
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
m->tsc = native_read_tsc();
m->tsc = rdtsc();
/* We hope get_seconds stays lockless */
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
Expand Down Expand Up @@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
{
unsigned long *cpu_tsc = (unsigned long *)data;

cpu_tsc[smp_processor_id()] = native_read_tsc();
cpu_tsc[smp_processor_id()] = rdtsc();
}

static int mce_apei_read_done;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/espfix_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ static void init_espfix_random(void)
*/
if (!arch_get_random_long(&rand)) {
/* The constant is an arbitrary large prime */
rand = native_read_tsc();
rand = rdtsc();
rand *= 0xc345c6b72fd16123UL;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/hpet.c
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)

/* Verify whether hpet counter works */
t1 = hpet_readl(HPET_COUNTER);
start = native_read_tsc();
start = rdtsc();

/*
* We don't know the TSC frequency yet, but waiting for
Expand All @@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
*/
do {
rep_nop();
now = native_read_tsc();
now = rdtsc();
} while ((now - start) < 200000UL);

if (t1 == hpet_readl(HPET_COUNTER)) {
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/trace_clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
u64 ret;

rdtsc_barrier();
ret = native_read_tsc();
ret = rdtsc();

return ret;
}
4 changes: 2 additions & 2 deletions arch/x86/kernel/tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)

data = cyc2ns_write_begin(cpu);

tsc_now = native_read_tsc();
tsc_now = rdtsc();
ns_now = cycles_2_ns(tsc_now);

/*
Expand Down Expand Up @@ -290,7 +290,7 @@ u64 native_sched_clock(void)
}

/* read the Time Stamp Counter: */
tsc_now = native_read_tsc();
tsc_now = rdtsc();

/* return the value in ns */
return cycles_2_ns(tsc_now);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)

tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0;
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);

/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
Expand Down Expand Up @@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
local_irq_save(flags);

now = apic->lapic_timer.timer.base->get_time();
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 1000000ULL;
do_div(ns, this_tsc_khz);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1080,7 +1080,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
u64 tsc;

tsc = svm_scale_tsc(vcpu, native_read_tsc());
tsc = svm_scale_tsc(vcpu, rdtsc());

return target_tsc - tsc;
}
Expand Down Expand Up @@ -3079,7 +3079,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) {
case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset +
svm_scale_tsc(vcpu, native_read_tsc());
svm_scale_tsc(vcpu, rdtsc());

break;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
{
u64 host_tsc, tsc_offset;

host_tsc = native_read_tsc();
host_tsc = rdtsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset;
}
Expand Down Expand Up @@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho

static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
return target_tsc - native_read_tsc();
return target_tsc - rdtsc();
}

static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
Expand Down
12 changes: 6 additions & 6 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -1455,7 +1455,7 @@ static cycle_t read_tsc(void)
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)native_read_tsc();
ret = (cycle_t)rdtsc();

last = pvclock_gtod_data.clock.cycle_last;

Expand Down Expand Up @@ -1646,7 +1646,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
host_tsc = rdtsc();
kernel_ns = get_kernel_ns();
}

Expand Down Expand Up @@ -2810,7 +2810,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
Expand Down Expand Up @@ -2838,7 +2838,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc();
vcpu->arch.last_host_tsc = rdtsc();
}

static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
Expand Down Expand Up @@ -6623,7 +6623,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
hw_breakpoint_restore();

vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
native_read_tsc());
rdtsc());

vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
Expand Down Expand Up @@ -7437,7 +7437,7 @@ int kvm_arch_hardware_enable(void)
if (ret != 0)
return ret;

local_tsc = native_read_tsc();
local_tsc = rdtsc();
stable = !check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/lib/delay.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops)
preempt_disable();
cpu = smp_processor_id();
rdtsc_barrier();
bclock = native_read_tsc();
bclock = rdtsc();
for (;;) {
rdtsc_barrier();
now = native_read_tsc();
now = rdtsc();
if ((now - bclock) >= loops)
break;

Expand All @@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops)
loops -= (now - bclock);
cpu = smp_processor_id();
rdtsc_barrier();
bclock = native_read_tsc();
bclock = rdtsc();
}
}
preempt_enable();
Expand All @@ -100,7 +100,7 @@ void use_tsc_delay(void)
int read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
*timer_val = native_read_tsc();
*timer_val = rdtsc();
return 0;
}
return -1;
Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/intel_pstate.c
Original file line number Diff line number Diff line change
Expand Up @@ -765,7 +765,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = native_read_tsc();
tsc = rdtsc();
local_irq_restore(flags);

cpu->last_sample_time = cpu->sample.time;
Expand Down
4 changes: 2 additions & 2 deletions drivers/input/gameport/gameport.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport)

for(i = 0; i < 50; i++) {
local_irq_save(flags);
t1 = native_read_tsc();
t1 = rdtsc();
for (t = 0; t < 50; t++) gameport_read(gameport);
t2 = native_read_tsc();
t2 = rdtsc();
local_irq_restore(flags);
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
Expand Down
4 changes: 2 additions & 2 deletions drivers/input/joystick/analog.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ struct analog_port {

#include <linux/i8253.h>

#define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0)
#define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
static unsigned int get_time_pit(void)
Expand All @@ -160,7 +160,7 @@ static unsigned int get_time_pit(void)
return count;
}
#elif defined(__x86_64__)
#define GET_TIME(x) do { x = (unsigned int)native_read_tsc(); } while (0)
#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
Expand Down
Loading

0 comments on commit 4ea1636

Please sign in to comment.