Skip to content

Commit

Permalink
sched/debug: Make 'const_debug' tunables unconditional __read_mostly
Browse files Browse the repository at this point in the history
With CONFIG_SCHED_DEBUG becoming unconditional, remove the
extra 'const_debug' indirection towards __read_mostly.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250317104257.3496611-3-mingo@kernel.org
  • Loading branch information
Ingo Molnar committed Mar 19, 2025
1 parent f7d2728 commit 57903f7
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 13 deletions.
4 changes: 2 additions & 2 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
*/
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
__read_mostly unsigned int sysctl_sched_features =
#include "features.h"
0;
#undef SCHED_FEAT
Expand All @@ -148,7 +148,7 @@ __read_mostly int sysctl_resched_latency_warn_once = 1;
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
__read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;

__read_mostly int scheduler_running;

Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
unsigned int sysctl_sched_base_slice = 700000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 700000ULL;

const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
__read_mostly unsigned int sysctl_sched_migration_cost = 500000UL;

static int __init setup_sched_thermal_decay_shift(char *str)
{
Expand Down
15 changes: 5 additions & 10 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2194,13 +2194,8 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
}

/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
* Tunables:
*/
#ifdef CONFIG_SCHED_DEBUG
# define const_debug __read_mostly
#else
# define const_debug const
#endif

#define SCHED_FEAT(name, enabled) \
__SCHED_FEAT_##name ,
Expand All @@ -2218,7 +2213,7 @@ enum {
* To support run-time toggling of sched features, all the translation units
* (but core.c) reference the sysctl_sched_features defined in core.c.
*/
extern const_debug unsigned int sysctl_sched_features;
extern __read_mostly unsigned int sysctl_sched_features;

#ifdef CONFIG_JUMP_LABEL

Expand Down Expand Up @@ -2249,7 +2244,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
*/
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
static const_debug __maybe_unused unsigned int sysctl_sched_features =
static __read_mostly __maybe_unused unsigned int sysctl_sched_features =
#include "features.h"
0;
#undef SCHED_FEAT
Expand Down Expand Up @@ -2837,8 +2832,8 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
# define SCHED_NR_MIGRATE_BREAK 32
#endif

extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
extern __read_mostly unsigned int sysctl_sched_migration_cost;

extern unsigned int sysctl_sched_base_slice;

Expand Down

0 comments on commit 57903f7

Please sign in to comment.