Skip to content

Commit

Permalink
timers: Split out "get next timer interrupt" functionality
Browse files Browse the repository at this point in the history
The functionality for getting the next timer interrupt in
get_next_timer_interrupt() is split into a separate function
fetch_next_timer_interrupt() to be usable by other call sites.

This is preparatory work for the conversion of the NOHZ timer
placement to a pull at expiry time model. No functional change.

Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240221090548.36600-13-anna-maria@linutronix.de
  • Loading branch information
Anna-Maria Behnsen authored and Thomas Gleixner committed Feb 22, 2024
1 parent 21927fc commit 70b4cf8
Showing 1 changed file with 38 additions and 26 deletions.
64 changes: 38 additions & 26 deletions kernel/time/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
return base->next_expiry;
}

static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
bool *idle)
static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
struct timer_base *base_local,
struct timer_base *base_global,
struct timer_events *tevt)
{
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
unsigned long nextevt, nextevt_local, nextevt_global;
struct timer_base *base_local, *base_global;
bool local_first;
u64 expires;

/*
* Pretend that there is no timer pending if the cpu is offline.
* Possible pending timers will be migrated later to an active cpu.
*/
if (cpu_is_offline(smp_processor_id())) {
if (idle)
*idle = true;
return tevt.local;
}

base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);

raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);

nextevt_local = next_timer_interrupt(base_local, basej);
nextevt_global = next_timer_interrupt(base_global, basej);
Expand All @@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
/* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej))
nextevt = basej;
tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC;
goto forward;
tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
return nextevt;
}

/*
Expand All @@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
* ignored. If the global queue is empty, nothing to do either.
*/
if (!local_first && base_global->timers_pending)
tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;

if (base_local->timers_pending)
tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;

return nextevt;
}

static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
bool *idle)
{
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
struct timer_base *base_local, *base_global;
unsigned long nextevt;
u64 expires;

/*
* Pretend that there is no timer pending if the cpu is offline.
* Possible pending timers will be migrated later to an active cpu.
*/
if (cpu_is_offline(smp_processor_id())) {
if (idle)
*idle = true;
return tevt.local;
}

base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);

raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);

nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
base_global, &tevt);

forward:
/*
* We have a fresh next event. Check whether we can forward the
* base.
Expand Down

0 comments on commit 70b4cf8

Please sign in to comment.