Skip to content

Commit

Permalink
Merge branches 'doc.2023.01.05a', 'fixes.2023.01.23a', 'kvfree.2023.0…
Browse files Browse the repository at this point in the history
…1.03a', 'srcu.2023.01.03a', 'srcu-always.2023.02.02a', 'tasks.2023.01.03a', 'torture.2023.01.05a' and 'torturescript.2023.01.03a' into HEAD

doc.2023.01.05a: Documentation update.
fixes.2023.01.23a: Miscellaneous fixes.
kvfree.2023.01.03a: kvfree_rcu() updates.
srcu.2023.01.03a: SRCU updates.
srcu-always.2023.02.02a: Finish making SRCU be unconditionally available.
tasks.2023.01.03a: Tasks-RCU updates.
torture.2023.01.05a: Torture-test updates.
torturescript.2023.01.03a: Torture-test scripting updates.
  • Loading branch information
Paul E. McKenney committed Feb 3, 2023
8 parents 5e013dc + cf7066b + 608723c + dafc4d1 + 5634469 + a4fcfbe + d52d3a2 + 5a6cd56 commit 8e1704b
Show file tree
Hide file tree
Showing 39 changed files with 1,010 additions and 506 deletions.
5 changes: 5 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5113,6 +5113,11 @@
rcupdate.rcu_cpu_stall_timeout to be used (after
conversion from seconds to milliseconds).

rcupdate.rcu_exp_stall_task_details= [KNL]
Print stack dumps of any tasks blocking the
current expedited RCU grace period during an
expedited RCU CPU stall warning.

rcupdate.rcu_expedited= [KNL]
Use expedited grace-period primitives, for
example, synchronize_rcu_expedited() instead
Expand Down
42 changes: 0 additions & 42 deletions drivers/base/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,6 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);

#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);

Expand Down Expand Up @@ -220,47 +219,6 @@ static void device_link_remove_from_lists(struct device_link *link)
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
}
#else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock);

static inline void device_links_write_lock(void)
{
down_write(&device_links_lock);
}

static inline void device_links_write_unlock(void)
{
up_write(&device_links_lock);
}

int device_links_read_lock(void)
{
down_read(&device_links_lock);
return 0;
}

void device_links_read_unlock(int not_used)
{
up_read(&device_links_lock);
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
int device_links_read_lock_held(void)
{
return lockdep_is_held(&device_links_lock);
}
#endif

static inline void device_link_synchronize_removal(void)
{
}

static void device_link_remove_from_lists(struct device_link *link)
{
list_del(&link->s_node);
list_del(&link->c_node);
}
#endif /* !CONFIG_SRCU */

static bool device_is_ancestor(struct device *dev, struct device *target)
{
Expand Down
1 change: 0 additions & 1 deletion drivers/dax/Kconfig
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig DAX
tristate "DAX: direct access to differentiated memory"
select SRCU
default m if NVDIMM_DAX

if DAX
Expand Down
1 change: 0 additions & 1 deletion drivers/hwtracing/stm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
config STM
tristate "System Trace Module devices"
select CONFIGFS_FS
select SRCU
help
A System Trace Module (STM) is a device exporting data in System
Trace Protocol (STP) format as defined by MIPI STP standards.
Expand Down
1 change: 0 additions & 1 deletion drivers/md/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
menuconfig MD
bool "Multiple devices driver support (RAID and LVM)"
depends on BLOCK
select SRCU
help
Support multiple physical spindles through a single logical device.
Required for RAID and logical volume management.
Expand Down
1 change: 0 additions & 1 deletion drivers/net/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,6 @@ config NETCONSOLE_DYNAMIC

config NETPOLL
def_bool NETCONSOLE
select SRCU

config NET_POLL_CONTROLLER
def_bool NETPOLL
Expand Down
2 changes: 1 addition & 1 deletion drivers/pci/controller/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ config PCIE_MEDIATEK_GEN3
MediaTek SoCs.

config VMD
depends on PCI_MSI && X86_64 && SRCU && !UML
depends on PCI_MSI && X86_64 && !UML
tristate "Intel Volume Management Device Driver"
help
Adds support for the Intel Volume Management Device (VMD). VMD is a
Expand Down
1 change: 0 additions & 1 deletion fs/btrfs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ config BTRFS_FS
select FS_IOMAP
select RAID6_PQ
select XOR_BLOCKS
select SRCU
depends on PAGE_SIZE_LESS_THAN_256KB

help
Expand Down
25 changes: 0 additions & 25 deletions fs/locks.c
Original file line number Diff line number Diff line change
Expand Up @@ -1889,7 +1889,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
}
EXPORT_SYMBOL(generic_setlease);

#if IS_ENABLED(CONFIG_SRCU)
/*
* Kernel subsystems can register to be notified on any attempt to set
* a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
Expand Down Expand Up @@ -1923,30 +1922,6 @@ void lease_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(lease_unregister_notifier);

#else /* !IS_ENABLED(CONFIG_SRCU) */
static inline void
lease_notifier_chain_init(void)
{
}

static inline void
setlease_notifier(long arg, struct file_lock *lease)
{
}

int lease_register_notifier(struct notifier_block *nb)
{
return 0;
}
EXPORT_SYMBOL_GPL(lease_register_notifier);

void lease_unregister_notifier(struct notifier_block *nb)
{
}
EXPORT_SYMBOL_GPL(lease_unregister_notifier);

#endif /* IS_ENABLED(CONFIG_SRCU) */

/**
* vfs_setlease - sets a lease on an open file
* @filp: file pointer
Expand Down
1 change: 0 additions & 1 deletion fs/notify/Kconfig
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config FSNOTIFY
def_bool n
select SRCU

source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
Expand Down
1 change: 0 additions & 1 deletion fs/quota/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
config QUOTA
bool "Quota support"
select QUOTACTL
select SRCU
help
If you say Y here, you will be able to set per user limits for disk
usage (also called disk quotas). Currently, it works for the
Expand Down
2 changes: 1 addition & 1 deletion include/linux/rculist_nulls.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
if (last) {
n->next = last->next;
n->pprev = &last->next;
rcu_assign_pointer(hlist_next_rcu(last), n);
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
Expand Down
19 changes: 15 additions & 4 deletions include/linux/rcupdate.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ void synchronize_rcu_tasks_rude(void);

#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
Expand All @@ -246,6 +247,7 @@ void exit_tasks_rcu_finish(void);
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */

Expand Down Expand Up @@ -374,11 +376,18 @@ static inline int debug_lockdep_rcu_enabled(void)
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
*
* This checks debug_lockdep_rcu_enabled() before checking (c) to
* prevent early boot splats due to lockdep not yet being initialized,
* and rechecks it after checking (c) to prevent false-positive splats
* due to races with lockdep being disabled. See commit 3066820034b5dd
* ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(".data.unlikely") __warned; \
if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
if (debug_lockdep_rcu_enabled() && (c) && \
debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
Expand Down Expand Up @@ -1004,15 +1013,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)

#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr)

#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
if (___p) { \
BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
(offsetof(typeof(*(ptr)), rhf))); \
kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
} \
} while (0)

Expand All @@ -1021,7 +1032,7 @@ do { \
typeof(ptr) ___p = (ptr); \
\
if (___p) \
kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
kvfree_call_rcu(NULL, (void *) (___p)); \
} while (0)

/*
Expand Down
12 changes: 6 additions & 6 deletions include/linux/rcutiny.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,25 +98,25 @@ static inline void synchronize_rcu_expedited(void)
*/
extern void kvfree(const void *addr);

static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head) {
call_rcu(head, func);
call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
return;
}

// kvfree_rcu(one_arg) call.
might_sleep();
synchronize_rcu();
kvfree((void *) func);
kvfree(ptr);
}

#ifdef CONFIG_KASAN_GENERIC
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
#else
static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
__kvfree_call_rcu(head, func);
__kvfree_call_rcu(head, ptr);
}
#endif

Expand Down
2 changes: 1 addition & 1 deletion include/linux/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(void)
}

void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);

void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
Expand Down
45 changes: 45 additions & 0 deletions include/linux/srcu.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,34 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
return retval;
}

/**
* srcu_down_read - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter a semaphore-like SRCU read-side critical section. Note that
* SRCU read-side critical sections may be nested. However, it is
* illegal to call anything that waits on an SRCU grace period for the
* same srcu_struct, whether directly or indirectly. Please note that
* one way to indirectly wait on an SRCU grace period is to acquire
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited(). But if you want lockdep to help you
* keep this stuff straight, you should instead use srcu_read_lock().
*
* The semaphore-like nature of srcu_down_read() means that the matching
* srcu_up_read() can be invoked from some other context, for example,
* from some other task or from an irq handler. However, neither
* srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler.
*
* Calls to srcu_down_read() may be nested, similar to the manner in
* which calls to down_read() may be nested.
*/
static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
{
WARN_ON_ONCE(in_nmi());
srcu_check_nmi_safety(ssp, false);
return __srcu_read_lock(ssp);
}

/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
Expand Down Expand Up @@ -254,6 +282,23 @@ srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
__srcu_read_unlock(ssp, idx);
}

/**
* srcu_up_read - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
*
* Exit an SRCU read-side critical section, but not necessarily from
* the same context as the maching srcu_down_read().
*/
static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
srcu_check_nmi_safety(ssp, false);
__srcu_read_unlock(ssp, idx);
}

/**
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
*
Expand Down
2 changes: 1 addition & 1 deletion include/linux/srcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ struct srcu_data {
struct srcu_node {
spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
/* if greater than ->srcu_gq_seq. */
/* if greater than ->srcu_gp_seq. */
unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
struct srcu_node *srcu_parent; /* Next up in tree. */
Expand Down
1 change: 0 additions & 1 deletion init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1865,7 +1865,6 @@ config PERF_EVENTS
default y if PROFILING
depends on HAVE_PERF_EVENTS
select IRQ_WORK
select SRCU
help
Enable kernel support for various performance events provided
by software and hardware.
Expand Down
Loading

0 comments on commit 8e1704b

Please sign in to comment.