diff --git a/[refs] b/[refs] index 14f607cbc869..40ab51efe91b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2d42244ae71d6c7b0884b5664cf2eda30fb2ae68 +refs/heads/master: d82f0b0f6f1a0a25afc288fb7135b1601fe6df18 diff --git a/trunk/drivers/clocksource/acpi_pm.c b/trunk/drivers/clocksource/acpi_pm.c index 3df338481004..5ca1d80de182 100644 --- a/trunk/drivers/clocksource/acpi_pm.c +++ b/trunk/drivers/clocksource/acpi_pm.c @@ -226,12 +226,9 @@ static int __init parse_pmtmr(char *arg) if (strict_strtoul(arg, 16, &base)) return -EINVAL; -#ifdef CONFIG_X86_64 - if (base > UINT_MAX) - return -ERANGE; -#endif + printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n", - pmtmr_ioport, base); + (unsigned int)pmtmr_ioport, base); pmtmr_ioport = base; return 1; diff --git a/trunk/include/linux/clocksource.h b/trunk/include/linux/clocksource.h index f88d32f8ff7c..55e434feec99 100644 --- a/trunk/include/linux/clocksource.h +++ b/trunk/include/linux/clocksource.h @@ -45,8 +45,7 @@ struct clocksource; * @read: returns a cycle value * @mask: bitmask for two's complement * subtraction of non 64 bit counters - * @mult: cycle to nanosecond multiplier (adjusted by NTP) - * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP) + * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * @flags: flags describing special properties * @vread: vsyscall based read @@ -64,7 +63,6 @@ struct clocksource { cycle_t (*read)(void); cycle_t mask; u32 mult; - u32 mult_orig; u32 shift; unsigned long flags; cycle_t (*vread)(void); @@ -79,7 +77,6 @@ struct clocksource { /* timekeeping specific data, ignore */ cycle_t cycle_interval; u64 xtime_interval; - u32 raw_interval; /* * Second part is written at each timer interrupt * Keep it in a different cache line to dirty no @@ -88,7 +85,6 @@ struct clocksource { cycle_t cycle_last ____cacheline_aligned_in_smp; u64 xtime_nsec; s64 error; - struct timespec raw_time; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ @@ -205,19 +201,17 @@ static inline void clocksource_calculate_interval(struct clocksource *c, { u64 tmp; - /* Do the ns -> cycle conversion first, using original mult */ + /* XXX - All of this could use a whole lot of optimization */ tmp = length_nsec; tmp <<= c->shift; - tmp += c->mult_orig/2; - do_div(tmp, c->mult_orig); + tmp += c->mult/2; + do_div(tmp, c->mult); c->cycle_interval = (cycle_t)tmp; if (c->cycle_interval == 0) c->cycle_interval = 1; - /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ c->xtime_interval = (u64)c->cycle_interval * c->mult; - c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; } diff --git a/trunk/include/linux/time.h b/trunk/include/linux/time.h index 205f974b9ebf..e15206a7e82e 100644 --- a/trunk/include/linux/time.h +++ b/trunk/include/linux/time.h @@ -117,7 +117,6 @@ extern int do_setitimer(int which, struct itimerval *value, extern unsigned int alarm_setitimer(unsigned int seconds); extern int do_getitimer(int which, struct itimerval *value); extern void getnstimeofday(struct timespec *tv); -extern void getrawmonotonic(struct timespec *ts); extern void getboottime(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts); @@ -215,7 +214,6 @@ struct itimerval { #define CLOCK_MONOTONIC 1 #define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_THREAD_CPUTIME_ID 3 -#define CLOCK_MONOTONIC_RAW 4 /* * The IDs of various hardware clocks: diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c index b8e4dce80a74..03ea1378c43b 100644 --- a/trunk/kernel/hrtimer.c +++ b/trunk/kernel/hrtimer.c @@ -1620,9 +1620,11 @@ static void migrate_hrtimers(int cpu) new_base = &get_cpu_var(hrtimer_bases); tick_cancel_sched_timer(cpu); - - local_irq_disable(); - spin_lock(&new_base->lock); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. + */ + spin_lock_irq(&new_base->lock); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { @@ -1631,8 +1633,7 @@ static void migrate_hrtimers(int cpu) } spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); - local_irq_enable(); + spin_unlock_irq(&new_base->lock); put_cpu_var(hrtimer_bases); } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/trunk/kernel/posix-timers.c b/trunk/kernel/posix-timers.c index d3c66b53dff6..e36d5798cbff 100644 --- a/trunk/kernel/posix-timers.c +++ b/trunk/kernel/posix-timers.c @@ -222,15 +222,6 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) return 0; } -/* - * Get monotonic time for posix timers - */ -static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) -{ - getrawmonotonic(tp); - return 0; -} - /* * Initialize everything, well, just everything in Posix clocks/timers ;) */ @@ -244,15 +235,9 @@ static __init int init_posix_timers(void) .clock_get = posix_ktime_get_ts, .clock_set = do_posix_clock_nosettime, }; - struct k_clock clock_monotonic_raw = { - .clock_getres = hrtimer_get_res, - .clock_get = posix_get_monotonic_raw, - .clock_set = do_posix_clock_nosettime, - }; register_posix_clock(CLOCK_REALTIME, &clock_realtime); register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); - register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof (struct k_itimer), 0, SLAB_PANIC, diff --git a/trunk/kernel/time/clocksource.c b/trunk/kernel/time/clocksource.c index 9ed2eec97526..093d4acf993b 100644 --- a/trunk/kernel/time/clocksource.c +++ b/trunk/kernel/time/clocksource.c @@ -325,9 +325,6 @@ int clocksource_register(struct clocksource *c) unsigned long flags; int ret; - /* save mult_orig on registration */ - c->mult_orig = c->mult; - spin_lock_irqsave(&clocksource_lock, flags); ret = clocksource_enqueue(c); if (!ret) diff --git a/trunk/kernel/time/jiffies.c b/trunk/kernel/time/jiffies.c index 1ca99557e929..4c256fdb8875 100644 --- a/trunk/kernel/time/jiffies.c +++ b/trunk/kernel/time/jiffies.c @@ -61,7 +61,6 @@ struct clocksource clocksource_jiffies = { .read = jiffies_read, .mask = 0xffffffff, /*32bits*/ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ - .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT, .shift = JIFFIES_SHIFT, }; diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index 5099c95b8aa2..e91c29f961c9 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -58,26 +58,27 @@ struct clocksource *clock; #ifdef CONFIG_GENERIC_TIME /** - * clocksource_forward_now - update clock to the current time + * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook * - * Forward the current clock to update its state since the last call to - * update_wall_time(). This is useful before significant clock changes, - * as it avoids having to deal with this time offset explicitly. + * private function, must hold xtime_lock lock when being + * called. Returns the number of nanoseconds since the + * last call to update_wall_time() (adjusted by NTP scaling) */ -static void clocksource_forward_now(void) +static inline s64 __get_nsec_offset(void) { cycle_t cycle_now, cycle_delta; - s64 nsec; + s64 ns_offset; + /* read clocksource: */ cycle_now = clocksource_read(clock); + + /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - clock->cycle_last = cycle_now; - nsec = cyc2ns(clock, cycle_delta); - timespec_add_ns(&xtime, nsec); + /* convert to nanoseconds: */ + ns_offset = cyc2ns(clock, cycle_delta); - nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; - clock->raw_time.tv_nsec += nsec; + return ns_offset; } /** @@ -88,7 +89,6 @@ static void clocksource_forward_now(void) */ void getnstimeofday(struct timespec *ts) { - cycle_t cycle_now, cycle_delta; unsigned long seq; s64 nsecs; @@ -96,15 +96,7 @@ void getnstimeofday(struct timespec *ts) seq = read_seqbegin(&xtime_lock); *ts = xtime; - - /* read clocksource: */ - cycle_now = clocksource_read(clock); - - /* calculate the delta since the last update_wall_time: */ - cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - - /* convert to nanoseconds: */ - nsecs = cyc2ns(clock, cycle_delta); + nsecs = __get_nsec_offset(); } while (read_seqretry(&xtime_lock, seq)); @@ -137,22 +129,22 @@ EXPORT_SYMBOL(do_gettimeofday); */ int do_settimeofday(struct timespec *tv) { - struct timespec ts_delta; unsigned long flags; + time_t wtm_sec, sec = tv->tv_sec; + long wtm_nsec, nsec = tv->tv_nsec; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irqsave(&xtime_lock, flags); - clocksource_forward_now(); - - ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; - ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; - wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); + nsec -= __get_nsec_offset(); - xtime = *tv; + wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); + wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + set_normalized_timespec(&xtime, sec, nsec); + set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); update_xtime_cache(0); clock->error = 0; @@ -178,19 +170,22 @@ EXPORT_SYMBOL(do_settimeofday); static void change_clocksource(void) { struct clocksource *new; + cycle_t now; + u64 nsec; new = clocksource_get_next(); if (clock == new) return; - clocksource_forward_now(); - - new->raw_time = clock->raw_time; + new->cycle_last = 0; + now = clocksource_read(new); + nsec = __get_nsec_offset(); + timespec_add_ns(&xtime, nsec); clock = new; - clock->cycle_last = 0; - clock->cycle_last = clocksource_read(new); + clock->cycle_last = now; + clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -205,43 +200,10 @@ static void change_clocksource(void) */ } #else -static inline void clocksource_forward_now(void) { } static inline void change_clocksource(void) { } +static inline s64 __get_nsec_offset(void) { return 0; } #endif -/** - * getrawmonotonic - Returns the raw monotonic time in a timespec - * @ts: pointer to the timespec to be set - * - * Returns the raw monotonic time (completely un-modified by ntp) - */ -void getrawmonotonic(struct timespec *ts) -{ - unsigned long seq; - s64 nsecs; - cycle_t cycle_now, cycle_delta; - - do { - seq = read_seqbegin(&xtime_lock); - - /* read clocksource: */ - cycle_now = clocksource_read(clock); - - /* calculate the delta since the last update_wall_time: */ - cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - - /* convert to nanoseconds: */ - nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; - - *ts = clock->raw_time; - - } while (read_seqretry(&xtime_lock, seq)); - - timespec_add_ns(ts, nsecs); -} -EXPORT_SYMBOL(getrawmonotonic); - - /** * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres */ @@ -303,6 +265,8 @@ void __init timekeeping_init(void) static int timekeeping_suspended; /* time in seconds when suspend began */ static unsigned long timekeeping_suspend_time; +/* xtime offset when we went into suspend */ +static s64 timekeeping_suspend_nsecs; /** * timekeeping_resume - Resumes the generic timekeeping subsystem. @@ -328,6 +292,8 @@ static int timekeeping_resume(struct sys_device *dev) wall_to_monotonic.tv_sec -= sleep_length; total_sleep_time += sleep_length; } + /* Make sure that we have the correct xtime reference */ + timespec_add_ns(&xtime, timekeeping_suspend_nsecs); update_xtime_cache(0); /* re-base the last cycle value */ clock->cycle_last = 0; @@ -353,7 +319,8 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) timekeeping_suspend_time = read_persistent_clock(); write_seqlock_irqsave(&xtime_lock, flags); - clocksource_forward_now(); + /* Get the current xtime offset */ + timekeeping_suspend_nsecs = __get_nsec_offset(); timekeeping_suspended = 1; write_sequnlock_irqrestore(&xtime_lock, flags); @@ -494,22 +461,16 @@ void update_wall_time(void) */ while (offset >= clock->cycle_interval) { /* accumulate one interval */ - offset -= clock->cycle_interval; + clock->xtime_nsec += clock->xtime_interval; clock->cycle_last += clock->cycle_interval; + offset -= clock->cycle_interval; - clock->xtime_nsec += clock->xtime_interval; if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; xtime.tv_sec++; second_overflow(); } - clock->raw_time.tv_nsec += clock->raw_interval; - if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { - clock->raw_time.tv_nsec -= NSEC_PER_SEC; - clock->raw_time.tv_sec++; - } - /* accumulate error between NTP and clock interval */ clock->error += tick_length; clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index 03bc7f1f1593..e8019cc3418d 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -1435,9 +1435,11 @@ static void __cpuinit migrate_timers(int cpu) BUG_ON(cpu_online(cpu)); old_base = per_cpu(tvec_bases, cpu); new_base = get_cpu_var(tvec_bases); - - local_irq_disable(); - spin_lock(&new_base->lock); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. + */ + spin_lock_irq(&new_base->lock); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); BUG_ON(old_base->running_timer); @@ -1452,8 +1454,7 @@ static void __cpuinit migrate_timers(int cpu) } spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); - local_irq_enable(); + spin_unlock_irq(&new_base->lock); put_cpu_var(tvec_bases); } #endif /* CONFIG_HOTPLUG_CPU */