Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 224094
b: refs/heads/master
c: 08ec0c5
h: refs/heads/master
v: v3
  • Loading branch information
John Stultz authored and John Stultz committed Dec 3, 2010
1 parent 1b283e1 commit b65537a
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b0f969009f647cd473c5e559aeec9c4229d12f87
refs/heads/master: 08ec0c58fb8a05d3191d5cb6f5d6f81adb419798
86 changes: 83 additions & 3 deletions trunk/arch/x86/kernel/tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -888,7 +888,82 @@ __cpuinit int unsynchronized_tsc(void)
return 0;
}

static void __init init_tsc_clocksource(void)

static void tsc_refine_calibration_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
/**
* tsc_refine_calibration_work - Further refine tsc freq calibration
* @work - ignored.
*
* This functions uses delayed work over a period of a
* second to further refine the TSC freq value. Since this is
* timer based, instead of loop based, we don't block the boot
* process while this longer calibration is done.
*
* If there are any calibration anomolies (too many SMIs, etc),
* or the refined calibration is off by 1% of the fast early
* calibration, we throw out the new calibration and use the
* early calibration.
*/
static void tsc_refine_calibration_work(struct work_struct *work)
{
static u64 tsc_start = -1, ref_start;
static int hpet;
u64 tsc_stop, ref_stop, delta;
unsigned long freq;

/* Don't bother refining TSC on unstable systems */
if (check_tsc_unstable())
goto out;

/*
* Since the work is started early in boot, we may be
* delayed the first time we expire. So set the workqueue
* again once we know timers are working.
*/
if (tsc_start == -1) {
/*
* Only set hpet once, to avoid mixing hardware
* if the hpet becomes enabled later.
*/
hpet = is_hpet_enabled();
schedule_delayed_work(&tsc_irqwork, HZ);
tsc_start = tsc_read_refs(&ref_start, hpet);
return;
}

tsc_stop = tsc_read_refs(&ref_stop, hpet);

/* hpet or pmtimer available ? */
if (!hpet && !ref_start && !ref_stop)
goto out;

/* Check, whether the sampling was disturbed by an SMI */
if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
goto out;

delta = tsc_stop - tsc_start;
delta *= 1000000LL;
if (hpet)
freq = calc_hpet_ref(delta, ref_start, ref_stop);
else
freq = calc_pmtimer_ref(delta, ref_start, ref_stop);

/* Make sure we're within 1% */
if (abs(tsc_khz - freq) > tsc_khz/100)
goto out;

tsc_khz = freq;
printk(KERN_INFO "Refined TSC clocksource calibration: "
"%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000,
(unsigned long)tsc_khz % 1000);

out:
clocksource_register_khz(&clocksource_tsc, tsc_khz);
}


static int __init init_tsc_clocksource(void)
{
if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
Expand All @@ -897,8 +972,14 @@ static void __init init_tsc_clocksource(void)
clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
}
clocksource_register_khz(&clocksource_tsc, tsc_khz);
schedule_delayed_work(&tsc_irqwork, 0);
return 0;
}
/*
* We use device_initcall here, to ensure we run after the hpet
* is fully initialized, which may occur at fs_initcall time.
*/
device_initcall(init_tsc_clocksource);

void __init tsc_init(void)
{
Expand Down Expand Up @@ -952,6 +1033,5 @@ void __init tsc_init(void)
mark_tsc_unstable("TSCs unsynchronized");

check_system_tsc_reliable();
init_tsc_clocksource();
}

0 comments on commit b65537a

Please sign in to comment.