Skip to content

Commit

Permalink
hrtimer: reduce calls to hrtimer_get_softirq_time()
Browse files Browse the repository at this point in the history
It seems that hrtimer_run_queues() is calling hrtimer_get_softirq_time() more
often than it needs to.  This can cause frequent contention on systems with
large numbers of processors/cores.

With this patch, hrtimer_run_queues only calls hrtimer_get_softirq_time() if
there is a pending timer in one of the hrtimer bases, and only once.

This also combines hrtimer_run_queues() and the inline run_hrtimer_queue()
into one function.

[ tglx@linutronix.de: coding style ]

Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Dimitri Sivanich authored and Thomas Gleixner committed Apr 21, 2008
1 parent 833df31 commit 833883d
Showing 1 changed file with 32 additions and 32 deletions.
64 changes: 32 additions & 32 deletions kernel/hrtimer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1238,51 +1238,51 @@ void hrtimer_run_pending(void)
/*
* Called from hardirq context every jiffy
*/
static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
int index)
void hrtimer_run_queues(void)
{
struct rb_node *node;
struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
int index, gettime = 1;

if (!base->first)
if (hrtimer_hres_active())
return;

if (base->get_softirq_time)
base->softirq_time = base->get_softirq_time();

spin_lock(&cpu_base->lock);
for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
base = &cpu_base->clock_base[index];

while ((node = base->first)) {
struct hrtimer *timer;

timer = rb_entry(node, struct hrtimer, node);
if (base->softirq_time.tv64 <= timer->expires.tv64)
break;

if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
__remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
if (!base->first)
continue;

if (gettime) {
hrtimer_get_softirq_time(cpu_base);
gettime = 0;
}

__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
}
if (base->get_softirq_time)
base->softirq_time = base->get_softirq_time();

void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
int i;
spin_lock(&cpu_base->lock);

if (hrtimer_hres_active())
return;
while ((node = base->first)) {
struct hrtimer *timer;

hrtimer_get_softirq_time(cpu_base);
timer = rb_entry(node, struct hrtimer, node);
if (base->softirq_time.tv64 <= timer->expires.tv64)
break;

for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
run_hrtimer_queue(cpu_base, i);
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
__remove_hrtimer(timer, base,
HRTIMER_STATE_PENDING, 0);
list_add_tail(&timer->cb_entry,
&base->cpu_base->cb_pending);
continue;
}

__run_hrtimer(timer);
}
spin_unlock(&cpu_base->lock);
}
}

/*
Expand Down

0 comments on commit 833883d

Please sign in to comment.