Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100016
b: refs/heads/master
c: 0ef9553
h: refs/heads/master
v: v3
  • Loading branch information
Alok Kataria authored and Ingo Molnar committed Jul 9, 2008
1 parent 529127b commit 6a47d0c
Show file tree
Hide file tree
Showing 6 changed files with 101 additions and 145 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 746f2eb790e75676ddc3b816ba18bac4179cc744
refs/heads/master: 0ef95533326a7b37d16025af9edc0c18e644b346
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o
obj-y += tsc_$(BITS).o io_delay.o rtc.o
obj-y += tsc_$(BITS).o io_delay.o rtc.o tsc.o

obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o
Expand Down
3 changes: 0 additions & 3 deletions trunk/arch/x86/kernel/time_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,6 @@

#include "do_timer.h"

unsigned int cpu_khz; /* Detected as we calibrate the TSC */
EXPORT_SYMBOL(cpu_khz);

int timer_ack;

unsigned long profile_pc(struct pt_regs *regs)
Expand Down
86 changes: 86 additions & 0 deletions trunk/arch/x86/kernel/tsc.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>

unsigned int cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
unsigned int tsc_khz;
EXPORT_SYMBOL(tsc_khz);

/*
* TSC can be unstable due to cpufreq or due to unsynced TSCs
*/
int tsc_unstable;

/* native_sched_clock() is called before tsc_init(), so
we must start with the TSC soft disabled to prevent
erroneous rdtsc usage on !cpu_has_tsc processors */
int tsc_disabled = -1;

/*
* Scheduler clock - returns current time in nanosec units.
*/
u64 native_sched_clock(void)
{
u64 this_offset;

/*
* Fall back to jiffies if there's no TSC available:
* ( But note that we still use it if the TSC is marked
* unstable. We do this because unlike Time Of Day,
* the scheduler clock tolerates small errors and it's
* very important for it to be as fast as the platform
* can achive it. )
*/
if (unlikely(tsc_disabled)) {
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}

/* read the Time Stamp Counter: */
rdtscll(this_offset);

/* return the value in ns */
return cycles_2_ns(this_offset);
}

/* We need to define a real function for sched_clock, to override the
weak default version */
#ifdef CONFIG_PARAVIRT
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#else
unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
#endif

int check_tsc_unstable(void)
{
return tsc_unstable;
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);

#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
"cannot disable TSC completely.\n");
tsc_disabled = 1;
return 1;
}
#else
/*
* disable flag for tsc. Takes effect by clearing the TSC cpu flag
* in cpu/common.c
*/
int __init notsc_setup(char *str)
{
setup_clear_cpu_cap(X86_FEATURE_TSC);
return 1;
}
#endif

__setup("notsc", notsc_setup);
86 changes: 2 additions & 84 deletions trunk/arch/x86/kernel/tsc_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,52 +15,8 @@

#include "mach_timer.h"

/* native_sched_clock() is called before tsc_init(), so
we must start with the TSC soft disabled to prevent
erroneous rdtsc usage on !cpu_has_tsc processors */
static int tsc_disabled = -1;

/*
* On some systems the TSC frequency does not
* change with the cpu frequency. So we need
* an extra value to store the TSC freq
*/
unsigned int tsc_khz;
EXPORT_SYMBOL_GPL(tsc_khz);

#ifdef CONFIG_X86_TSC
static int __init tsc_setup(char *str)
{
printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
"cannot disable TSC completely.\n");
tsc_disabled = 1;
return 1;
}
#else
/*
* disable flag for tsc. Takes effect by clearing the TSC cpu flag
* in cpu/common.c
*/
static int __init tsc_setup(char *str)
{
setup_clear_cpu_cap(X86_FEATURE_TSC);
return 1;
}
#endif

__setup("notsc", tsc_setup);

/*
* code to mark and check if the TSC is unstable
* due to cpufreq or due to unsynced TSCs
*/
static int tsc_unstable;

int check_tsc_unstable(void)
{
return tsc_unstable;
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);
extern int tsc_unstable;
extern int tsc_disabled;

/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
Expand Down Expand Up @@ -109,44 +65,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
local_irq_restore(flags);
}

/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long native_sched_clock(void)
{
unsigned long long this_offset;

/*
* Fall back to jiffies if there's no TSC available:
* ( But note that we still use it if the TSC is marked
* unstable. We do this because unlike Time Of Day,
* the scheduler clock tolerates small errors and it's
* very important for it to be as fast as the platform
* can achive it. )
*/
if (unlikely(tsc_disabled))
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);

/* read the Time Stamp Counter: */
rdtscll(this_offset);

/* return the value in ns */
return cycles_2_ns(this_offset);
}

/* We need to define a real function for sched_clock, to override the
weak default version */
#ifdef CONFIG_PARAVIRT
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#else
unsigned long long sched_clock(void)
__attribute__((alias("native_sched_clock")));
#endif

unsigned long native_calculate_cpu_khz(void)
{
unsigned long long start, end;
Expand Down
67 changes: 11 additions & 56 deletions trunk/arch/x86/kernel/tsc_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,8 @@
#include <asm/timer.h>
#include <asm/vgtod.h>

static int notsc __initdata = 0;

unsigned int cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
unsigned int tsc_khz;
EXPORT_SYMBOL(tsc_khz);
extern int tsc_unstable;
extern int tsc_disabled;

/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
Expand All @@ -41,6 +37,7 @@ EXPORT_SYMBOL(tsc_khz);
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/

DEFINE_PER_CPU(unsigned long, cyc2ns);

static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
Expand All @@ -63,41 +60,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
local_irq_restore(flags);
}

unsigned long long native_sched_clock(void)
{
unsigned long a = 0;

/* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
* which means it is not completely exact and may not be monotonous
* between CPUs. But the errors should be too small to matter for
* scheduling purposes.
*/

rdtscll(a);
return cycles_2_ns(a);
}

/* We need to define a real function for sched_clock, to override the
weak default version */
#ifdef CONFIG_PARAVIRT
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#else
unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
#endif


static int tsc_unstable;

int check_tsc_unstable(void)
{
return tsc_unstable;
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);

#ifdef CONFIG_CPU_FREQ

/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
Expand Down Expand Up @@ -281,14 +243,6 @@ __cpuinit int unsynchronized_tsc(void)
return num_present_cpus() > 1;
}

int __init notsc_setup(char *s)
{
notsc = 1;
return 1;
}

__setup("notsc", notsc_setup);

static struct clocksource clocksource_tsc;

/*
Expand Down Expand Up @@ -346,12 +300,13 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);

void __init init_tsc_clocksource(void)
{
if (!notsc) {
clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
clocksource_tsc.shift);
if (check_tsc_unstable())
clocksource_tsc.rating = 0;
if (tsc_disabled > 0)
return;

clocksource_register(&clocksource_tsc);
}
clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
clocksource_tsc.shift);
if (check_tsc_unstable())
clocksource_tsc.rating = 0;

clocksource_register(&clocksource_tsc);
}

0 comments on commit 6a47d0c

Please sign in to comment.