Skip to content

Commit

Permalink
rcu/context_tracking: Move dynticks counter to context tracking
Browse files Browse the repository at this point in the history
In order to prepare for merging RCU dynticks counter into the context
tracking state, move the rcu_data's dynticks field to the context
tracking structure. It will later be mixed within the context tracking
state itself.

[ paulmck: Move enum ctx_state into global scope. ]

Acked-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
  • Loading branch information
Frederic Weisbecker authored and Paul E. McKenney committed Jul 5, 2022
1 parent 3864caa commit 62e2412
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 43 deletions.
45 changes: 38 additions & 7 deletions include/linux/context_tracking_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,15 @@
#include <linux/static_key.h>
#include <linux/context_tracking_irq.h>

enum ctx_state {
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
};

struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
Expand All @@ -15,17 +23,40 @@ struct context_tracking {
*/
bool active;
int recursion;
enum ctx_state {
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0,
CONTEXT_USER,
CONTEXT_GUEST,
} state;
enum ctx_state state;
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
atomic_t dynticks; /* Even value for idle, else odd. */
#endif
};

#ifdef CONFIG_CONTEXT_TRACKING
DECLARE_PER_CPU(struct context_tracking, context_tracking);
#endif

#ifdef CONFIG_CONTEXT_TRACKING_IDLE
static __always_inline int ct_dynticks(void)
{
return atomic_read(this_cpu_ptr(&context_tracking.dynticks));
}

static __always_inline int ct_dynticks_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);

return atomic_read(&ct->dynticks);
}

static __always_inline int ct_dynticks_cpu_acquire(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);

return atomic_read_acquire(&ct->dynticks);
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */

#ifdef CONFIG_CONTEXT_TRACKING_USER
extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);

static __always_inline bool context_tracking_enabled(void)
{
Expand Down
10 changes: 7 additions & 3 deletions kernel/context_tracking.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,13 @@
#include <linux/kprobes.h>


DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
.dynticks = ATOMIC_INIT(1),
#endif
};
EXPORT_SYMBOL_GPL(context_tracking);

#ifdef CONFIG_CONTEXT_TRACKING_IDLE
noinstr void ct_idle_enter(void)
{
Expand Down Expand Up @@ -138,9 +145,6 @@ noinstr void ct_nmi_exit(void)
DEFINE_STATIC_KEY_FALSE(context_tracking_key);
EXPORT_SYMBOL_GPL(context_tracking_key);

DEFINE_PER_CPU(struct context_tracking, context_tracking);
EXPORT_SYMBOL_GPL(context_tracking);

static noinstr bool context_tracking_recursion_enter(void)
{
int recursion;
Expand Down
56 changes: 27 additions & 29 deletions kernel/rcu/tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.dynticks_nesting = 1,
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
.dynticks = ATOMIC_INIT(1),
#ifdef CONFIG_RCU_NOCB_CPU
.cblist.flags = SEGCBLIST_RCU_CORE,
#endif
Expand Down Expand Up @@ -268,7 +267,7 @@ void rcu_softirq_qs(void)
*/
static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
{
return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.dynticks));
}

/*
Expand Down Expand Up @@ -324,9 +323,7 @@ static noinstr void rcu_dynticks_eqs_exit(void)
*/
static void rcu_dynticks_eqs_online(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);

if (atomic_read(&rdp->dynticks) & 0x1)
if (ct_dynticks() & 0x1)
return;
rcu_dynticks_inc(1);
}
Expand All @@ -338,17 +335,17 @@ static void rcu_dynticks_eqs_online(void)
*/
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
return !(arch_atomic_read(this_cpu_ptr(&context_tracking.dynticks)) & 0x1);
}

/*
* Snapshot the ->dynticks counter with full ordering so as to allow
* stable comparison of this counter with past and future snapshots.
*/
static int rcu_dynticks_snap(struct rcu_data *rdp)
static int rcu_dynticks_snap(int cpu)
{
smp_mb(); // Fundamental RCU ordering guarantee.
return atomic_read_acquire(&rdp->dynticks);
return ct_dynticks_cpu_acquire(cpu);
}

/*
Expand All @@ -363,9 +360,7 @@ static bool rcu_dynticks_in_eqs(int snap)
/* Return true if the specified CPU is currently idle from an RCU viewpoint. */
bool rcu_is_idle_cpu(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);

return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
return rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
}

/*
Expand All @@ -375,7 +370,7 @@ bool rcu_is_idle_cpu(int cpu)
*/
static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
{
return snap != rcu_dynticks_snap(rdp);
return snap != rcu_dynticks_snap(rdp->cpu);
}

/*
Expand All @@ -384,19 +379,18 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
*/
bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
int snap;

// If not quiescent, force back to earlier extended quiescent state.
snap = atomic_read(&rdp->dynticks) & ~0x1;
snap = ct_dynticks_cpu(cpu) & ~0x1;

smp_rmb(); // Order ->dynticks and *vp reads.
if (READ_ONCE(*vp))
return false; // Non-zero, so report failure;
smp_rmb(); // Order *vp read and ->dynticks re-read.

// If still in the same extended quiescent state, we are good!
return snap == atomic_read(&rdp->dynticks);
return snap == ct_dynticks_cpu(cpu);
}

/*
Expand Down Expand Up @@ -620,6 +614,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
static noinstr void rcu_eqs_enter(bool user)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);

WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
Expand All @@ -633,12 +628,12 @@ static noinstr void rcu_eqs_enter(bool user)

instrumentation_begin();
lockdep_assert_irqs_disabled();
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, ct_dynticks());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
rcu_preempt_deferred_qs(current);

// instrumentation for the noinstr rcu_dynticks_eqs_enter()
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));

instrumentation_end();
WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
Expand Down Expand Up @@ -740,7 +735,7 @@ noinstr void rcu_user_enter(void)
* rcu_nmi_exit - inform RCU of exit from NMI context
*
* If we are returning from the outermost NMI handler that interrupted an
* RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
* RCU-idle period, update ct->dynticks and rdp->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
Expand All @@ -749,6 +744,7 @@ noinstr void rcu_user_enter(void)
*/
noinstr void rcu_nmi_exit(void)
{
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);

instrumentation_begin();
Expand All @@ -766,19 +762,19 @@ noinstr void rcu_nmi_exit(void)
*/
if (rdp->dynticks_nmi_nesting != 1) {
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
atomic_read(&rdp->dynticks));
ct_dynticks());
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
rdp->dynticks_nmi_nesting - 2);
instrumentation_end();
return;
}

/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, ct_dynticks());
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */

// instrumentation for the noinstr rcu_dynticks_eqs_enter()
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));
instrumentation_end();

// RCU is watching here ...
Expand Down Expand Up @@ -817,6 +813,7 @@ void rcu_irq_exit_check_preempt(void)
*/
static void noinstr rcu_eqs_exit(bool user)
{
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
struct rcu_data *rdp;
long oldval;

Expand All @@ -836,9 +833,9 @@ static void noinstr rcu_eqs_exit(bool user)
instrumentation_begin();

// instrumentation for the noinstr rcu_dynticks_eqs_exit()
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));

trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, ct_dynticks());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(rdp->dynticks_nesting, 1);
WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
Expand Down Expand Up @@ -944,7 +941,7 @@ void __rcu_irq_enter_check_tick(void)
/**
* rcu_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
* If the CPU was idle from RCU's viewpoint, update ct->dynticks and
* rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
* that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably
Expand All @@ -957,6 +954,7 @@ noinstr void rcu_nmi_enter(void)
{
long incby = 2;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct context_tracking *ct = this_cpu_ptr(&context_tracking);

/* Complain about underflow. */
WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
Expand All @@ -980,9 +978,9 @@ noinstr void rcu_nmi_enter(void)

instrumentation_begin();
// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
instrument_atomic_read(&ct->dynticks, sizeof(ct->dynticks));
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
instrument_atomic_write(&ct->dynticks, sizeof(ct->dynticks));

incby = 1;
} else if (!in_nmi()) {
Expand All @@ -994,7 +992,7 @@ noinstr void rcu_nmi_enter(void)

trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
rdp->dynticks_nmi_nesting,
rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
rdp->dynticks_nmi_nesting + incby, ct_dynticks());
instrumentation_end();
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
rdp->dynticks_nmi_nesting + incby);
Expand Down Expand Up @@ -1138,7 +1136,7 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
rdp->dynticks_snap = rcu_dynticks_snap(rdp);
rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
Expand Down Expand Up @@ -4142,7 +4140,7 @@ rcu_boot_init_percpu_data(int cpu)
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
INIT_WORK(&rdp->strict_work, strict_work_handler);
WARN_ON_ONCE(rdp->dynticks_nesting != 1);
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
rdp->barrier_seq_snap = rcu_state.barrier_sequence;
rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
Expand Down
1 change: 0 additions & 1 deletion kernel/rcu/tree.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@ struct rcu_data {
int dynticks_snap; /* Per-GP tracking for dynticks. */
long dynticks_nesting; /* Track process nesting level. */
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
bool rcu_urgent_qs; /* GP old need light quiescent state. */
bool rcu_forced_tick; /* Forced tick to provide QS. */
Expand Down
2 changes: 1 addition & 1 deletion kernel/rcu/tree_exp.h
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
!(rnp->qsmaskinitnext & mask)) {
mask_ofl_test |= mask;
} else {
snap = rcu_dynticks_snap(rdp);
snap = rcu_dynticks_snap(cpu);
if (rcu_dynticks_in_eqs(snap))
mask_ofl_test |= mask;
else
Expand Down
4 changes: 2 additions & 2 deletions kernel/rcu/tree_stall.h
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ static void print_cpu_stall_info(int cpu)
}
delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
falsepositive = rcu_is_gp_kthread_starving(NULL) &&
rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
if (rcuc_starved)
sprintf(buf, " rcuc=%ld jiffies(starved)", j);
Expand All @@ -478,7 +478,7 @@ static void print_cpu_stall_info(int cpu)
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta],
ticks_value, ticks_title,
rcu_dynticks_snap(rdp) & 0xfff,
rcu_dynticks_snap(cpu) & 0xfff,
rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
Expand Down

0 comments on commit 62e2412

Please sign in to comment.