From 3229adbe787534b43430f92e10175c9b77f2d27c Mon Sep 17 00:00:00 2001 From: K Prateek Nayak Date: Mon, 23 Dec 2024 04:34:06 +0000 Subject: [PATCH] sched/fair: Do not compute overloaded status unnecessarily during lb Only set sg_overloaded when computing sg_lb_stats() at the highest sched domain since rd->overloaded status is updated only when load balancing at the highest domain. While at it, move setting of sg_overloaded below idle_cpu() check since an idle CPU can never be overloaded. Signed-off-by: K Prateek Nayak Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Reviewed-by: Shrikanth Hegde Link: https://lore.kernel.org/r/20241223043407.1611-8-kprateek.nayak@amd.com --- kernel/sched/fair.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 650d698244c47..98ac49ce78ea8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10410,6 +10410,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, bool *sg_overutilized) { int i, nr_running, local_group, sd_flags = env->sd->flags; + bool balancing_at_rd = !env->sd->parent; memset(sgs, 0, sizeof(*sgs)); @@ -10427,9 +10428,6 @@ static inline void update_sg_lb_stats(struct lb_env *env, nr_running = rq->nr_running; sgs->sum_nr_running += nr_running; - if (nr_running > 1) - *sg_overloaded = 1; - if (cpu_overutilized(i)) *sg_overutilized = 1; @@ -10442,6 +10440,10 @@ static inline void update_sg_lb_stats(struct lb_env *env, continue; } + /* Overload indicator is only updated at root domain */ + if (balancing_at_rd && nr_running > 1) + *sg_overloaded = 1; + #ifdef CONFIG_NUMA_BALANCING /* Only fbq_classify_group() uses this to classify NUMA groups */ if (sd_flags & SD_NUMA) {