Skip to content

Commit

Permalink
timers: Keep the pinned timers separate from the others
Browse files Browse the repository at this point in the history
Separate the storage space for pinned timers. Deferrable timers (doesn't
matter if pinned or non pinned) are still enqueued into their own base.

This is preparatory work for changing the NOHZ timer placement from a push
at enqueue time to a pull at expiry time model.

Originally-by: Richard Cochran (linutronix GmbH) <richardcochran@gmail.com>
Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240221090548.36600-11-anna-maria@linutronix.de
  • Loading branch information
Anna-Maria Behnsen authored and Thomas Gleixner committed Feb 22, 2024
1 parent 9f6a3c6 commit 83a665d
Showing 1 changed file with 56 additions and 29 deletions.
85 changes: 56 additions & 29 deletions kernel/time/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,12 +187,18 @@ EXPORT_SYMBOL(jiffies_64);
#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)

#ifdef CONFIG_NO_HZ_COMMON
# define NR_BASES 2
# define BASE_STD 0
# define BASE_DEF 1
/*
* If multiple bases need to be locked, use the base ordering for lock
* nesting, i.e. lowest number first.
*/
# define NR_BASES 3
# define BASE_LOCAL 0
# define BASE_GLOBAL 1
# define BASE_DEF 2
#else
# define NR_BASES 1
# define BASE_STD 0
# define BASE_LOCAL 0
# define BASE_GLOBAL 0
# define BASE_DEF 0
#endif

Expand Down Expand Up @@ -944,7 +950,10 @@ static int detach_if_pending(struct timer_list *timer, struct timer_base *base,

static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
{
struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
struct timer_base *base;

base = per_cpu_ptr(&timer_bases[index], cpu);

/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
Expand All @@ -957,7 +966,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)

static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
struct timer_base *base;

base = this_cpu_ptr(&timer_bases[index]);

/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
Expand Down Expand Up @@ -2006,6 +2018,9 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
* Move next_expiry for the empty base into the future to prevent an
* unnecessary raise of the timer softirq when the next_expiry value
* will be reached even if there is no timer pending.
*
* This update is also required to make timer_base::next_expiry values
* easy comparable to find out which base holds the first pending timer.
*/
if (!base->timers_pending)
base->next_expiry = basej + NEXT_TIMER_MAX_DELTA;
Expand All @@ -2016,9 +2031,10 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
bool *idle)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
unsigned long nextevt, nextevt_local, nextevt_global;
struct timer_base *base_local, *base_global;
u64 expires = KTIME_MAX;
unsigned long nextevt;
bool local_first;

/*
* Pretend that there is no timer pending if the cpu is offline.
Expand All @@ -2030,10 +2046,20 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
return expires;
}

raw_spin_lock(&base->lock);
nextevt = next_timer_interrupt(base, basej);
base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);

if (base->timers_pending) {
raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);

nextevt_local = next_timer_interrupt(base_local, basej);
nextevt_global = next_timer_interrupt(base_global, basej);

local_first = time_before_eq(nextevt_local, nextevt_global);

nextevt = local_first ? nextevt_local : nextevt_global;

if (base_local->timers_pending || base_global->timers_pending) {
/* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej))
nextevt = basej;
Expand All @@ -2044,31 +2070,31 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
* We have a fresh next event. Check whether we can forward the
* base.
*/
__forward_timer_base(base, basej);
__forward_timer_base(base_local, basej);
__forward_timer_base(base_global, basej);

/*
* Set base->is_idle only when caller is timer_base_try_to_set_idle()
*/
if (idle) {
/*
* Base is idle if the next event is more than a tick away.
* Bases are idle if the next event is more than a tick away.
*
* If the base is marked idle then any timer add operation must
* forward the base clk itself to keep granularity small. This
* idle logic is only maintained for the BASE_STD base,
* deferrable timers may still see large granularity skew (by
* design).
* idle logic is only maintained for the BASE_LOCAL and
* BASE_GLOBAL base, deferrable timers may still see large
* granularity skew (by design).
*/
if (!base->is_idle) {
if (time_after(nextevt, basej + 1)) {
base->is_idle = true;
trace_timer_base_idle(true, base->cpu);
}
if (!base_local->is_idle && time_after(nextevt, basej + 1)) {
base_local->is_idle = base_global->is_idle = true;
trace_timer_base_idle(true, base_local->cpu);
}
*idle = base->is_idle;
*idle = base_local->is_idle;
}

raw_spin_unlock(&base->lock);
raw_spin_unlock(&base_global->lock);
raw_spin_unlock(&base_local->lock);

return cmp_next_hrtimer_event(basem, expires);
}
Expand Down Expand Up @@ -2112,15 +2138,14 @@ u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle)
*/
void timer_clear_idle(void)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);

/*
* We do this unlocked. The worst outcome is a remote enqueue sending
* a pointless IPI, but taking the lock would just make the window for
* sending the IPI a few instructions smaller for the cost of taking
* the lock in the exit from idle path.
*/
base->is_idle = false;
__this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
__this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
trace_timer_base_idle(false, smp_processor_id());
}
#endif
Expand Down Expand Up @@ -2171,19 +2196,21 @@ static inline void __run_timers(struct timer_base *base)
*/
static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);

__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
__run_timers(this_cpu_ptr(&timer_bases[BASE_GLOBAL]));
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
}
}

/*
* Called by the local, per-CPU timer interrupt on SMP.
*/
static void run_local_timers(void)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);

hrtimer_run_queues();

Expand Down

0 comments on commit 83a665d

Please sign in to comment.