Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 155476
b: refs/heads/master
c: 6ff7041
h: refs/heads/master
v: v3
  • Loading branch information
Thomas Gleixner committed Jul 10, 2009
1 parent 43825b7 commit 29e73bb
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 78 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7e0c5086c172ecf8b0c2ad860b02a586967d17d0
refs/heads/master: 6ff7041dbfeb3bd7dfe9aa67275c21199ef760d6
9 changes: 0 additions & 9 deletions trunk/include/linux/clockchips.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,3 @@ extern void clockevents_notify(unsigned long reason, void *arg);
#endif

#endif

#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern ktime_t clockevents_get_next_event(int cpu);
#else
static inline ktime_t clockevents_get_next_event(int cpu)
{
return (ktime_t) { .tv64 = KTIME_MAX };
}
#endif
121 changes: 64 additions & 57 deletions trunk/kernel/hrtimer.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,46 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}
}


/*
* Get the preferred target CPU for NOHZ
*/
static int hrtimer_get_target(int this_cpu, int pinned)
{
#ifdef CONFIG_NO_HZ
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) {
int preferred_cpu = get_nohz_load_balancer();

if (preferred_cpu >= 0)
return preferred_cpu;
}
#endif
return this_cpu;
}

/*
* With HIGHRES=y we do not migrate the timer when it is expiring
* before the next event on the target cpu because we cannot reprogram
* the target cpu hardware and we would cause it to fire late.
*
* Called with cpu_base->lock of target cpu held.
*/
static int
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
{
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires;

if (!new_base->cpu_base->hres_active)
return 0;

expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
#else
return 0;
#endif
}

/*
* Switch the timer base to the current CPU when possible.
*/
Expand All @@ -200,35 +240,16 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
{
struct hrtimer_clock_base *new_base;
struct hrtimer_cpu_base *new_cpu_base;
int cpu, preferred_cpu = -1;

cpu = smp_processor_id();
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
preferred_cpu = get_nohz_load_balancer();
if (preferred_cpu >= 0) {
/*
* We must not check the expiry value when
* preferred_cpu is the current cpu. If base
* != new_base we would loop forever when the
* timer expires before the current programmed
* next timer event.
*/
if (preferred_cpu != cpu)
cpu = preferred_cpu;
else
preferred_cpu = -1;
}
}
#endif
int this_cpu = smp_processor_id();
int cpu = hrtimer_get_target(this_cpu, pinned);

again:
new_cpu_base = &per_cpu(hrtimer_bases, cpu);
new_base = &new_cpu_base->clock_base[base->index];

if (base != new_base) {
/*
* We are trying to schedule the timer on the local CPU.
* We are trying to move timer to new_base.
* However we can't change timer's base while it is running,
* so we keep it on the same CPU. No hassle vs. reprogramming
* the event source in the high resolution case. The softirq
Expand All @@ -244,38 +265,12 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
spin_unlock(&base->cpu_base->lock);
spin_lock(&new_base->cpu_base->lock);

/* Optimized away for NOHZ=n SMP=n */
if (cpu == preferred_cpu) {
/* Calculate clock monotonic expiry time */
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires = ktime_sub(hrtimer_get_expires(timer),
new_base->offset);
#else
ktime_t expires = hrtimer_get_expires(timer);
#endif

/*
* Get the next event on target cpu from the
* clock events layer.
* This covers the highres=off nohz=on case as well.
*/
ktime_t next = clockevents_get_next_event(cpu);

ktime_t delta = ktime_sub(expires, next);

/*
* We do not migrate the timer when it is expiring
* before the next event on the target cpu because
* we cannot reprogram the target cpu hardware and
* we would cause it to fire late.
*/
if (delta.tv64 < 0) {
cpu = smp_processor_id();
spin_unlock(&new_base->cpu_base->lock);
spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
spin_unlock(&new_base->cpu_base->lock);
spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
timer->base = new_base;
}
Expand Down Expand Up @@ -1287,14 +1282,22 @@ void hrtimer_interrupt(struct clock_event_device *dev)

expires_next.tv64 = KTIME_MAX;

spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
* the migration code. This does not affect enqueueing of
* timers which run their callback and need to be requeued on
* this CPU.
*/
cpu_base->expires_next.tv64 = KTIME_MAX;

base = cpu_base->clock_base;

for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
ktime_t basenow;
struct rb_node *node;

spin_lock(&cpu_base->lock);

basenow = ktime_add(now, base->offset);

while ((node = base->first)) {
Expand Down Expand Up @@ -1327,11 +1330,15 @@ void hrtimer_interrupt(struct clock_event_device *dev)

__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
base++;
}

/*
* Store the new expiry value so the migration code can verify
* against it.
*/
cpu_base->expires_next = expires_next;
spin_unlock(&cpu_base->lock);

/* Reprogramming necessary ? */
if (expires_next.tv64 != KTIME_MAX) {
Expand Down
11 changes: 0 additions & 11 deletions trunk/kernel/time/clockevents.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,15 +254,4 @@ void clockevents_notify(unsigned long reason, void *arg)
spin_unlock(&clockevents_lock);
}
EXPORT_SYMBOL_GPL(clockevents_notify);

ktime_t clockevents_get_next_event(int cpu)
{
struct tick_device *td;
struct clock_event_device *dev;

td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;

return dev->next_event;
}
#endif

0 comments on commit 29e73bb

Please sign in to comment.