From 4f35dd70fada49baa358372a4dfe907cd472f6fb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 19 Jul 2007 21:28:35 +0200 Subject: [PATCH] --- yaml --- r: 61771 b: refs/heads/master c: e436d80085133858bf2613a630365e8a0459fd58 h: refs/heads/master i: 61769: c9d320c6c297bb22f22ec28dc667af217b9709f5 61767: ce25915343b1e66593ff69d1f27ecbec70c83ee8 v: v3 --- [refs] | 2 +- trunk/include/linux/sched.h | 7 +++++++ trunk/kernel/sched.c | 17 +++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 0475ae790500..c9ebd1fd3e77 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 969bb4e4032dac67287951d8f6642a3b5119694e +refs/heads/master: e436d80085133858bf2613a630365e8a0459fd58 diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 94f624aef017..33b9b4841ee7 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); + +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +extern unsigned long long cpu_clock(int cpu); + extern unsigned long long task_sched_runtime(struct task_struct *task); diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index a35a92ff38fd..93cf241cfbe9 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +unsigned long long cpu_clock(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long long now; + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + now = rq_clock(rq); + spin_unlock_irqrestore(&rq->lock, flags); + + return now; +} + #ifdef CONFIG_FAIR_GROUP_SCHED /* Change a task's ->cfs_rq if it moves across CPUs */ static inline void set_task_cfs_rq(struct task_struct *p)