Skip to content

Commit

Permalink
rcu/nocb: Optimize kthreads and rdp initialization
Browse files Browse the repository at this point in the history
Currently cpumask_available() is used to prevent from unwanted NOCB
initialization.  However if neither "rcu_nocbs=" nor "nohz_full="
parameters are passed to a kernel built with CONFIG_CPUMASK_OFFSTACK=n,
the initialization path is still taken, running through all sorts of
needless operations and iterations on an empty cpumask.

Fix this by relying on a real initialization state instead.  This also
optimizes kthread creation, preventing needless iteration over all online
CPUs when the kernel is booted without any offloaded CPUs.

Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Tested-by: Juri Lelli <juri.lelli@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
  • Loading branch information
Frederic Weisbecker authored and Paul E. McKenney committed Dec 9, 2021
1 parent 8d97039 commit a81aeaf
Showing 1 changed file with 17 additions and 7 deletions.
24 changes: 17 additions & 7 deletions kernel/rcu/tree_nocb.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,17 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
* If the list is invalid, a warning is emitted and all CPUs are offloaded.
*/

static bool rcu_nocb_is_setup;

static int __init rcu_nocb_setup(char *str)
{
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
if (cpulist_parse(str, rcu_nocb_mask)) {
pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
cpumask_setall(rcu_nocb_mask);
}
rcu_nocb_is_setup = true;
return 1;
}
__setup("rcu_nocbs=", rcu_nocb_setup);
Expand Down Expand Up @@ -1167,13 +1171,17 @@ void __init rcu_init_nohz(void)
need_rcu_nocb_mask = true;
#endif /* #if defined(CONFIG_NO_HZ_FULL) */

if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
return;
if (need_rcu_nocb_mask) {
if (!cpumask_available(rcu_nocb_mask)) {
if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
return;
}
}
rcu_nocb_is_setup = true;
}
if (!cpumask_available(rcu_nocb_mask))

if (!rcu_nocb_is_setup)
return;

#if defined(CONFIG_NO_HZ_FULL)
Expand Down Expand Up @@ -1275,8 +1283,10 @@ static void __init rcu_spawn_nocb_kthreads(void)
{
int cpu;

for_each_online_cpu(cpu)
rcu_spawn_cpu_nocb_kthread(cpu);
if (rcu_nocb_is_setup) {
for_each_online_cpu(cpu)
rcu_spawn_cpu_nocb_kthread(cpu);
}
}

/* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
Expand Down

0 comments on commit a81aeaf

Please sign in to comment.