Skip to content

Commit

Permalink
x86-64: Move vread_tsc into a new file with sensible options
Browse files Browse the repository at this point in the history
vread_tsc is short and hot, and it's userspace code so the usual
reasons to enable -pg and turn off sibling calls don't apply.

(OK, turning off sibling calls has no effect.  But it might
someday...)

As an added benefit, tsc.c is profilable now.

Signed-off-by: Andy Lutomirski <luto@mit.edu>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Borislav Petkov <bp@amd64.org>
Link: http://lkml.kernel.org/r/%3C99c6d7f5efa3ccb65b4ac6eb443e1ab7bad47d7b.1306156808.git.luto%40mit.edu%3E
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Andy Lutomirski authored and Thomas Gleixner committed May 24, 2011
1 parent 0f51f28 commit 44259b1
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 37 deletions.
4 changes: 4 additions & 0 deletions arch/x86/include/asm/tsc.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern unsigned long native_calibrate_tsc(void);

#ifdef CONFIG_X86_64
extern cycles_t vread_tsc(void);
#endif

/*
* Boot-time check whether the TSCs are synchronized across
* all CPUs/cores:
Expand Down
8 changes: 5 additions & 3 deletions arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)

ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_pvclock.o = -pg
Expand All @@ -24,13 +23,16 @@ endif
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
CFLAGS_hpet.o := $(nostackp)
CFLAGS_tsc.o := $(nostackp)
CFLAGS_vread_tsc_64.o := $(nostackp)
CFLAGS_paravirt.o := $(nostackp)
GCOV_PROFILE_vsyscall_64.o := n
GCOV_PROFILE_hpet.o := n
GCOV_PROFILE_tsc.o := n
GCOV_PROFILE_paravirt.o := n

# vread_tsc_64 is hot and should be fully optimized:
CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls

obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
Expand All @@ -39,7 +41,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
Expand Down
34 changes: 0 additions & 34 deletions arch/x86/kernel/tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -763,40 +763,6 @@ static cycle_t read_tsc(struct clocksource *cs)
ret : clocksource_tsc.cycle_last;
}

#ifdef CONFIG_X86_64
static cycle_t __vsyscall_fn vread_tsc(void)
{
cycle_t ret;
u64 last;

/*
* Empirically, a fence (of type that depends on the CPU)
* before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads. The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)vget_cycles();

last = VVAR(vsyscall_gtod_data).clock.cycle_last;

if (likely(ret >= last))
return ret;

/*
* GCC likes to generate cmov here, but this branch is extremely
* predictable (it's just a funciton of time and the likely is
* very likely) and there's a data dependence, so force GCC
* to generate a branch instead. I don't barrier() because
* we don't actually need a barrier, and if this function
* ever gets inlined it will generate worse code.
*/
asm volatile ("");
return last;
}
#endif

static void resume_tsc(struct clocksource *cs)
{
clocksource_tsc.cycle_last = 0;
Expand Down
36 changes: 36 additions & 0 deletions arch/x86/kernel/vread_tsc_64.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/* This code runs in userspace. */

#define DISABLE_BRANCH_PROFILING
#include <asm/vgtod.h>

notrace cycle_t __vsyscall_fn vread_tsc(void)
{
cycle_t ret;
u64 last;

/*
* Empirically, a fence (of type that depends on the CPU)
* before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads. The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)vget_cycles();

last = VVAR(vsyscall_gtod_data).clock.cycle_last;

if (likely(ret >= last))
return ret;

/*
* GCC likes to generate cmov here, but this branch is extremely
* predictable (it's just a funciton of time and the likely is
* very likely) and there's a data dependence, so force GCC
* to generate a branch instead. I don't barrier() because
* we don't actually need a barrier, and if this function
* ever gets inlined it will generate worse code.
*/
asm volatile ("");
return last;
}

0 comments on commit 44259b1

Please sign in to comment.