Skip to content

Commit

Permalink
sched: add rq_clock()/__rq_clock()
Browse files Browse the repository at this point in the history
add rq_clock()/__rq_clock(), a robust wrapper around sched_clock(),
used by CFS. It protects against common type of sched_clock() problems
(caused by hardware): time warps forwards and backwards.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Ingo Molnar committed Jul 9, 2007
1 parent 6aa645e commit 20d315d
Showing 1 changed file with 46 additions and 0 deletions.
46 changes: 46 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,52 @@ static inline int cpu_of(struct rq *rq)
#endif
}

/*
* Per-runqueue clock, as finegrained as the platform can give us:
*/
static unsigned long long __rq_clock(struct rq *rq)
{
u64 prev_raw = rq->prev_clock_raw;
u64 now = sched_clock();
s64 delta = now - prev_raw;
u64 clock = rq->clock;

/*
* Protect against sched_clock() occasionally going backwards:
*/
if (unlikely(delta < 0)) {
clock++;
rq->clock_warps++;
} else {
/*
* Catch too large forward jumps too:
*/
if (unlikely(delta > 2*TICK_NSEC)) {
clock++;
rq->clock_overflows++;
} else {
if (unlikely(delta > rq->clock_max_delta))
rq->clock_max_delta = delta;
clock += delta;
}
}

rq->prev_clock_raw = now;
rq->clock = clock;

return clock;
}

static inline unsigned long long rq_clock(struct rq *rq)
{
int this_cpu = smp_processor_id();

if (this_cpu == cpu_of(rq))
return __rq_clock(rq);

return rq->clock;
}

/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
Expand Down

0 comments on commit 20d315d

Please sign in to comment.