Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 257287
b: refs/heads/master
c: 0d7b854
h: refs/heads/master
i:
  257285: a72a351
  257283: 3d7ba15
  257279: 0a5d2c5
v: v3
  • Loading branch information
Andy Lutomirski authored and Ingo Molnar committed Jun 5, 2011
1 parent 140084d commit 8ee51b2
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 69 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9fd67b4ed0714ab718f1f9bd14c344af336a6df7
refs/heads/master: 0d7b8547fb67d5c2a7d954c56b3715b0e708be4a
1 change: 0 additions & 1 deletion trunk/arch/x86/include/asm/vgtod.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ struct vsyscall_gtod_data {
time_t wall_time_sec;
u32 wall_time_nsec;

int sysctl_enabled;
struct timezone sys_tz;
struct { /* extract of a clocksource struct */
cycle_t (*vread)(void);
Expand Down
34 changes: 1 addition & 33 deletions trunk/arch/x86/kernel/vsyscall_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ DEFINE_VVAR(int, vgetcpu_mode);
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
{
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
.sysctl_enabled = 1,
};

void update_vsyscall_tz(void)
Expand Down Expand Up @@ -103,15 +102,6 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
return ret;
}

static __always_inline long time_syscall(long *t)
{
long secs;
asm volatile("syscall"
: "=a" (secs)
: "0" (__NR_time),"D" (t) : __syscall_clobber);
return secs;
}

static __always_inline void do_vgettimeofday(struct timeval * tv)
{
cycle_t now, base, mask, cycle_delta;
Expand All @@ -122,8 +112,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);

vread = VVAR(vsyscall_gtod_data).clock.vread;
if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
!vread)) {
if (unlikely(!vread)) {
gettimeofday(tv,NULL);
return;
}
Expand Down Expand Up @@ -165,8 +154,6 @@ time_t __vsyscall(1) vtime(time_t *t)
{
unsigned seq;
time_t result;
if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
return time_syscall(t);

do {
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
Expand Down Expand Up @@ -227,22 +214,6 @@ static long __vsyscall(3) venosys_1(void)
return -ENOSYS;
}

#ifdef CONFIG_SYSCTL
static ctl_table kernel_table2[] = {
{ .procname = "vsyscall64",
.data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec },
{}
};

static ctl_table kernel_root_table2[] = {
{ .procname = "kernel", .mode = 0555,
.child = kernel_table2 },
{}
};
#endif

/* Assume __initcall executes before all user space. Hopefully kmod
doesn't violate that. We'll find out if it does. */
static void __cpuinit vsyscall_set_cpu(int cpu)
Expand Down Expand Up @@ -301,9 +272,6 @@ static int __init vsyscall_init(void)
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
#ifdef CONFIG_SYSCTL
register_sysctl_table(kernel_root_table2);
#endif
on_each_cpu(cpu_vsyscall_init, NULL, 1);
/* notifier priority > KVM */
hotcpu_notifier(cpu_vsyscall_notifier, 30);
Expand Down
55 changes: 21 additions & 34 deletions trunk/arch/x86/vdso/vclock_gettime.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,21 +116,21 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)

notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
if (likely(gtod->sysctl_enabled))
switch (clock) {
case CLOCK_REALTIME:
if (likely(gtod->clock.vread))
return do_realtime(ts);
break;
case CLOCK_MONOTONIC:
if (likely(gtod->clock.vread))
return do_monotonic(ts);
break;
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(ts);
}
switch (clock) {
case CLOCK_REALTIME:
if (likely(gtod->clock.vread))
return do_realtime(ts);
break;
case CLOCK_MONOTONIC:
if (likely(gtod->clock.vread))
return do_monotonic(ts);
break;
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(ts);
}

return vdso_fallback_gettime(clock, ts);
}
int clock_gettime(clockid_t, struct timespec *)
Expand All @@ -139,7 +139,7 @@ int clock_gettime(clockid_t, struct timespec *)
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
long ret;
if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
if (likely(gtod->clock.vread)) {
if (likely(tv != NULL)) {
BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
offsetof(struct timespec, tv_nsec) ||
Expand All @@ -161,27 +161,14 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));

/* This will break when the xtime seconds get inaccurate, but that is
* unlikely */

static __always_inline long time_syscall(long *t)
{
long secs;
asm volatile("syscall"
: "=a" (secs)
: "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
return secs;
}

/*
* This will break when the xtime seconds get inaccurate, but that is
* unlikely
*/
notrace time_t __vdso_time(time_t *t)
{
time_t result;

if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
return time_syscall(t);

/* This is atomic on x86_64 so we don't need any locks. */
result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);

if (t)
*t = result;
Expand Down

0 comments on commit 8ee51b2

Please sign in to comment.