Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100217
b: refs/heads/master
c: 76a2a6e
h: refs/heads/master
i:
  100215: a3605f5
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 27, 2008
1 parent 6b73b8e commit 8bb9f18
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 77 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c09595f63bb1909c5dc4dca288f4fe818561b5f3
refs/heads/master: 76a2a6ee8a0660a29127f05989ac59ae1ce865fa
76 changes: 0 additions & 76 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -818,82 +818,6 @@ static inline u64 global_rt_runtime(void)
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}

unsigned long long time_sync_thresh = 100000;

static DEFINE_PER_CPU(unsigned long long, time_offset);
static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);

/*
* Global lock which we take every now and then to synchronize
* the CPUs time. This method is not warp-safe, but it's good
* enough to synchronize slowly diverging time sources and thus
* it's good enough for tracing:
*/
static DEFINE_SPINLOCK(time_sync_lock);
static unsigned long long prev_global_time;

static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
{
/*
* We want this inlined, to not get tracer function calls
* in this critical section:
*/
spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
__raw_spin_lock(&time_sync_lock.raw_lock);

if (time < prev_global_time) {
per_cpu(time_offset, cpu) += prev_global_time - time;
time = prev_global_time;
} else {
prev_global_time = time;
}

__raw_spin_unlock(&time_sync_lock.raw_lock);
spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);

return time;
}

static unsigned long long __cpu_clock(int cpu)
{
unsigned long long now;

/*
* Only call sched_clock() if the scheduler has already been
* initialized (some code might call cpu_clock() very early):
*/
if (unlikely(!scheduler_running))
return 0;

now = sched_clock_cpu(cpu);

return now;
}

/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
*/
unsigned long long cpu_clock(int cpu)
{
unsigned long long prev_cpu_time, time, delta_time;
unsigned long flags;

local_irq_save(flags);
prev_cpu_time = per_cpu(prev_cpu_time, cpu);
time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
delta_time = time-prev_cpu_time;

if (unlikely(delta_time > time_sync_thresh)) {
time = __sync_cpu_clock(time, cpu);
per_cpu(prev_cpu_time, cpu) = time;
}
local_irq_restore(flags);

return time;
}
EXPORT_SYMBOL_GPL(cpu_clock);

#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#endif
Expand Down
12 changes: 12 additions & 0 deletions trunk/kernel/sched_clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,3 +244,15 @@ unsigned long long __attribute__((weak)) sched_clock(void)
{
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
}

unsigned long long cpu_clock(int cpu)
{
unsigned long long clock;
unsigned long flags;

raw_local_irq_save(flags);
clock = sched_clock_cpu(cpu);
raw_local_irq_restore(flags);

return clock;
}

0 comments on commit 8bb9f18

Please sign in to comment.