Skip to content

Commit

Permalink
sched/debug: Make CONFIG_SCHED_DEBUG functionality unconditional
Browse files Browse the repository at this point in the history
All the big Linux distros enable CONFIG_SCHED_DEBUG, because
the various features it provides help not just with kernel
development, but with system administration and user-space
software development as well.

Reflect this reality and enable this functionality
unconditionally.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250317104257.3496611-4-mingo@kernel.org
  • Loading branch information
Ingo Molnar committed Mar 19, 2025
1 parent 57903f7 commit dd5bdaf
Show file tree
Hide file tree
Showing 12 changed files with 9 additions and 108 deletions.
7 changes: 0 additions & 7 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -1489,7 +1489,6 @@ static const struct file_operations proc_fail_nth_operations = {
#endif


#ifdef CONFIG_SCHED_DEBUG
/*
* Print out various scheduling related per-task fields:
*/
Expand Down Expand Up @@ -1539,8 +1538,6 @@ static const struct file_operations proc_pid_sched_operations = {
.release = single_release,
};

#endif

#ifdef CONFIG_SCHED_AUTOGROUP
/*
* Print out autogroup related information:
Expand Down Expand Up @@ -3331,9 +3328,7 @@ static const struct pid_entry tgid_base_stuff[] = {
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
ONE("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
#endif
Expand Down Expand Up @@ -3682,9 +3677,7 @@ static const struct pid_entry tid_base_stuff[] = {
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
ONE("limits", S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
NOD("comm", S_IFREG|S_IRUGO|S_IWUSR,
&proc_tid_comm_inode_operations,
&proc_pid_set_comm_operations, {}),
Expand Down
2 changes: 0 additions & 2 deletions include/linux/energy_model.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,9 +240,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
struct em_perf_state *ps;
int i;

#ifdef CONFIG_SCHED_DEBUG
WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
#endif

if (!sum_util)
return 0;
Expand Down
2 changes: 0 additions & 2 deletions include/linux/sched/debug.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,10 @@ extern void show_stack(struct task_struct *task, unsigned long *sp,

extern void sched_show_task(struct task_struct *p);

#ifdef CONFIG_SCHED_DEBUG
struct seq_file;
extern void proc_sched_show_task(struct task_struct *p,
struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif

/* Attach to any functions which should be ignored in wchan output. */
#define __sched __section(".sched.text")
Expand Down
4 changes: 0 additions & 4 deletions include/linux/sched/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,12 @@ enum {
};
#undef SD_FLAG

#ifdef CONFIG_SCHED_DEBUG

struct sd_flag_debug {
unsigned int meta_flags;
char *name;
};
extern const struct sd_flag_debug sd_flag_debug[];

#endif

#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
Expand Down
2 changes: 0 additions & 2 deletions include/trace/events/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,7 @@ static inline long __trace_sched_switch_state(bool preempt,
{
unsigned int state;

#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */

/*
* Preemption ignores task state, therefore preempted tasks are always
Expand Down
4 changes: 1 addition & 3 deletions kernel/sched/build_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,7 @@
# include "cpufreq_schedutil.c"
#endif

#ifdef CONFIG_SCHED_DEBUG
# include "debug.c"
#endif
#include "debug.c"

#ifdef CONFIG_SCHEDSTATS
# include "stats.c"
Expand Down
18 changes: 3 additions & 15 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);

DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

#ifdef CONFIG_SCHED_DEBUG
/*
* Debugging: various feature bits
*
Expand All @@ -142,7 +141,6 @@ __read_mostly unsigned int sysctl_sched_features =
*/
__read_mostly int sysctl_resched_latency_warn_ms = 100;
__read_mostly int sysctl_resched_latency_warn_once = 1;
#endif /* CONFIG_SCHED_DEBUG */

/*
* Number of tasks to iterate in a single balance run.
Expand Down Expand Up @@ -799,11 +797,10 @@ void update_rq_clock(struct rq *rq)
if (rq->clock_update_flags & RQCF_ACT_SKIP)
return;

#ifdef CONFIG_SCHED_DEBUG
if (sched_feat(WARN_DOUBLE_CLOCK))
WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
rq->clock_update_flags |= RQCF_UPDATED;
#endif

clock = sched_clock_cpu(cpu_of(rq));
scx_rq_clock_update(rq, clock);

Expand Down Expand Up @@ -3291,7 +3288,6 @@ void relax_compatible_cpus_allowed_ptr(struct task_struct *p)

void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
unsigned int state = READ_ONCE(p->__state);

/*
Expand Down Expand Up @@ -3329,7 +3325,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
WARN_ON_ONCE(!cpu_online(new_cpu));

WARN_ON_ONCE(is_migration_disabled(p));
#endif

trace_sched_migrate_task(p, new_cpu);

Expand Down Expand Up @@ -5577,7 +5572,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}

#ifdef CONFIG_SCHED_DEBUG
static u64 cpu_resched_latency(struct rq *rq)
{
int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
Expand Down Expand Up @@ -5622,9 +5616,6 @@ static int __init setup_resched_latency_warn_ms(char *str)
return 1;
}
__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
#else
static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
#endif /* CONFIG_SCHED_DEBUG */

/*
* This function gets called by the timer code, with HZ frequency.
Expand Down Expand Up @@ -6718,9 +6709,7 @@ static void __sched notrace __schedule(int sched_mode)
picked:
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
#endif

if (likely(prev != next)) {
rq->nr_switches++;
Expand Down Expand Up @@ -7094,7 +7083,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
void *key)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
Expand Down Expand Up @@ -7811,10 +7800,9 @@ void show_state_filter(unsigned int state_filter)
sched_show_task(p);
}

#ifdef CONFIG_SCHED_DEBUG
if (!state_filter)
sysrq_sched_debug_show();
#endif

rcu_read_unlock();
/*
* Only show locks if all tasks are dumped:
Expand Down
2 changes: 0 additions & 2 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -3574,9 +3574,7 @@ void dl_bw_free(int cpu, u64 dl_bw)
}
#endif

#ifdef CONFIG_SCHED_DEBUG
void print_dl_stats(struct seq_file *m, int cpu)
{
print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
}
#endif /* CONFIG_SCHED_DEBUG */
4 changes: 0 additions & 4 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,6 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
return best;
}

#ifdef CONFIG_SCHED_DEBUG
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
Expand All @@ -1010,7 +1009,6 @@ int sched_update_scaling(void)
return 0;
}
#endif
#endif

static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);

Expand Down Expand Up @@ -13668,7 +13666,6 @@ DEFINE_SCHED_CLASS(fair) = {
#endif
};

#ifdef CONFIG_SCHED_DEBUG
void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq, *pos;
Expand Down Expand Up @@ -13702,7 +13699,6 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
rcu_read_unlock();
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */

__init void init_sched_fair_class(void)
{
Expand Down
5 changes: 1 addition & 4 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,8 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)

static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif

return container_of(rt_se, struct task_struct, rt);
}

Expand Down Expand Up @@ -2969,7 +2968,6 @@ static int sched_rr_handler(const struct ctl_table *table, int write, void *buff
}
#endif /* CONFIG_SYSCTL */

#ifdef CONFIG_SCHED_DEBUG
void print_rt_stats(struct seq_file *m, int cpu)
{
rt_rq_iter_t iter;
Expand All @@ -2980,4 +2978,3 @@ void print_rt_stats(struct seq_file *m, int cpu)
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */
Loading

0 comments on commit dd5bdaf

Please sign in to comment.