Skip to content

Commit

Permalink
rcu/context-tracking: Remove unused and/or unecessary middle functions
Browse files Browse the repository at this point in the history
Some eqs functions are now only used internally by context tracking, so
their public declarations can be removed.

Also middle functions such as rcu_user_*() and rcu_idle_*()
which now directly call to rcu_eqs_enter() and rcu_eqs_exit() can be
wiped out as well.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
  • Loading branch information
Frederic Weisbecker authored and Paul E. McKenney committed Jul 5, 2022
1 parent 1721145 commit c33ef43
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 92 deletions.
2 changes: 1 addition & 1 deletion Documentation/RCU/stallwarn.rst
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ warnings:
which will include additional debugging information.

- A low-level kernel issue that either fails to invoke one of the
variants of rcu_user_enter(), rcu_user_exit(), ct_idle_enter(),
variants of rcu_eqs_enter(true), rcu_eqs_exit(true), ct_idle_enter(),
ct_idle_exit(), ct_irq_enter(), or ct_irq_exit() on the one
hand, or that invokes one of them too many times on the other.
Historically, the most frequent issue has been an omission
Expand Down
8 changes: 0 additions & 8 deletions include/linux/hardirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,14 +92,6 @@ void irq_exit_rcu(void);
#define arch_nmi_exit() do { } while (0)
#endif

#ifdef CONFIG_TINY_RCU
static inline void rcu_nmi_enter(void) { }
static inline void rcu_nmi_exit(void) { }
#else
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif

/*
* NMI vs Tracing
* --------------
Expand Down
8 changes: 0 additions & 8 deletions include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,6 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */

#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
#endif /* CONFIG_NO_HZ_FULL */

#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
Expand Down
2 changes: 0 additions & 2 deletions include/linux/rcutiny.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,6 @@ static inline int rcu_needs_cpu(void)
static inline void rcu_virt_note_context_switch(int cpu) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
#define rcu_is_idle_cpu(cpu) \
(is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
Expand Down
2 changes: 0 additions & 2 deletions include/linux/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,6 @@ unsigned long start_poll_synchronize_rcu(void);
bool poll_state_synchronize_rcu(unsigned long oldstate);
void cond_synchronize_rcu(unsigned long oldstate);

void rcu_idle_enter(void);
void rcu_idle_exit(void);
bool rcu_is_idle_cpu(int cpu);

#ifdef CONFIG_PROVE_RCU
Expand Down
98 changes: 27 additions & 71 deletions kernel/context_tracking.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,17 +189,17 @@ static void noinstr rcu_eqs_exit(bool user)
}

/**
* rcu_nmi_exit - inform RCU of exit from NMI context
* ct_nmi_exit - inform RCU of exit from NMI context
*
* If we are returning from the outermost NMI handler that interrupted an
* RCU-idle period, update ct->dynticks and ct->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
* If you add or remove a call to rcu_nmi_exit(), be sure to test
* If you add or remove a call to ct_nmi_exit(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_nmi_exit(void)
void noinstr ct_nmi_exit(void)
{
struct context_tracking *ct = this_cpu_ptr(&context_tracking);

Expand Down Expand Up @@ -242,18 +242,18 @@ void noinstr rcu_nmi_exit(void)
}

/**
* rcu_nmi_enter - inform RCU of entry to NMI context
* ct_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle from RCU's viewpoint, update ct->dynticks and
* ct->dynticks_nmi_nesting to let the RCU grace-period handling know
* that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
*
* If you add or remove a call to rcu_nmi_enter(), be sure to test
* If you add or remove a call to ct_nmi_enter(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_nmi_enter(void)
void noinstr ct_nmi_enter(void)
{
long incby = 2;
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
Expand Down Expand Up @@ -302,51 +302,40 @@ void noinstr rcu_nmi_enter(void)
}

/**
* rcu_idle_enter - inform RCU that current CPU is entering idle
* ct_idle_enter - inform RCU that current CPU is entering idle
*
* Enter idle mode, in other words, -leave- the mode in which RCU
* read-side critical sections can occur. (Though RCU read-side
* critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().)
*
* If you add or remove a call to rcu_idle_enter(), be sure to test with
* If you add or remove a call to ct_idle_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_idle_enter(void)
void noinstr ct_idle_enter(void)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
rcu_eqs_enter(false);
}
EXPORT_SYMBOL_GPL(ct_idle_enter);

/**
* rcu_idle_exit - inform RCU that current CPU is leaving idle
* ct_idle_exit - inform RCU that current CPU is leaving idle
*
* Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur.
*
* If you add or remove a call to rcu_idle_exit(), be sure to test with
* If you add or remove a call to ct_idle_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_idle_exit(void)
void noinstr ct_idle_exit(void)
{
unsigned long flags;

raw_local_irq_save(flags);
rcu_eqs_exit(false);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);

noinstr void ct_idle_enter(void)
{
rcu_idle_enter();
}
EXPORT_SYMBOL_GPL(ct_idle_enter);

void ct_idle_exit(void)
{
rcu_idle_exit();
}
EXPORT_SYMBOL_GPL(ct_idle_exit);

/**
Expand Down Expand Up @@ -431,50 +420,11 @@ void ct_irq_exit_irqson(void)
ct_irq_exit();
local_irq_restore(flags);
}

noinstr void ct_nmi_enter(void)
{
rcu_nmi_enter();
}

noinstr void ct_nmi_exit(void)
{
rcu_nmi_exit();
}
#else
static __always_inline void rcu_eqs_enter(bool user) { }
static __always_inline void rcu_eqs_exit(bool user) { }
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */

#ifdef CONFIG_NO_HZ_FULL
/**
* rcu_user_enter - inform RCU that we are resuming userspace.
*
* Enter RCU idle mode right before resuming userspace. No use of RCU
* is permitted between this call and rcu_user_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*
* If you add or remove a call to rcu_user_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
noinstr void rcu_user_enter(void)
{
rcu_eqs_enter(true);
}

/**
* rcu_user_exit - inform RCU that we are exiting userspace.
*
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*
* If you add or remove a call to rcu_user_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
void noinstr rcu_user_exit(void)
{
rcu_eqs_exit(true);
}
#endif /* #ifdef CONFIG_NO_HZ_FULL */

#ifdef CONFIG_CONTEXT_TRACKING_USER

#define CREATE_TRACE_POINTS
Expand Down Expand Up @@ -542,7 +492,13 @@ void noinstr __ct_user_enter(enum ctx_state state)
* that will fire and reschedule once we resume in user/guest mode.
*/
rcu_irq_work_resched();
rcu_user_enter();
/*
* Enter RCU idle mode right before resuming userspace. No use of RCU
* is permitted between this call and rcu_eqs_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*/
rcu_eqs_enter(true);
}
/*
* Even if context tracking is disabled on this CPU, because it's outside
Expand Down Expand Up @@ -579,7 +535,7 @@ void ct_user_enter(enum ctx_state state)
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* ct_irq_enter() rcu_user_exit() rcu_user_exit() ct_irq_exit()
* ct_irq_enter() rcu_eqs_exit(true) rcu_eqs_enter(true) ct_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
Expand Down Expand Up @@ -631,10 +587,10 @@ void noinstr __ct_user_exit(enum ctx_state state)
if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
/*
* We are going to run code that may use RCU. Inform
* RCU core about that (ie: we may need the tick again).
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*/
rcu_user_exit();
rcu_eqs_exit(true);
if (state == CONTEXT_USER) {
instrumentation_begin();
vtime_user_exit(current);
Expand Down

0 comments on commit c33ef43

Please sign in to comment.