From aa0fdccda4076c81d07a0c0b05602ee2aa17a2be Mon Sep 17 00:00:00 2001 From: Joel Granados Date: Wed, 22 Jan 2025 11:33:49 +0100 Subject: [PATCH 01/32] tests/module: nix-ify Use "#!/usr/bin/env bash" instead of "#!/bin/bash". This is necessary for nix environments as they only provide /usr/bin/env at the standard location. Signed-off-by: Joel Granados Acked-by: Luis Chamberlain Link: https://lore.kernel.org/r/20250122-jag-nix-ify-v1-1-addb3170f93c@kernel.org Signed-off-by: Petr Pavlu --- lib/tests/module/gen_test_kallsyms.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tests/module/gen_test_kallsyms.sh b/lib/tests/module/gen_test_kallsyms.sh index 561dcac0f359c..31fe4ed63de83 100755 --- a/lib/tests/module/gen_test_kallsyms.sh +++ b/lib/tests/module/gen_test_kallsyms.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash TARGET=$(basename $1) DIR=lib/tests/module From 838e6dd8b59262d0b59041d65d21331daa83dd36 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:31 +0100 Subject: [PATCH 02/32] module: Begin to move from RCU-sched to RCU. The RCU usage in module was introduced in commit d72b37513cdfb ("Remove stop_machine during module load v2") and it claimed not to be RCU but similar. Then there was another improvement in commit e91defa26c527 ("module: don't use stop_machine on module load"). It become a mix of RCU and RCU-sched and was eventually fixed 0be964be0d450 ("module: Sanitize RCU usage and locking"). Later RCU & RCU-sched was merged in commit cb2f55369d3a9 ("modules: Replace synchronize_sched() and call_rcu_sched()") so that was aligned. Looking at it today, there is still leftovers. The preempt_disable() was used instead rcu_read_lock_sched(). The RCU & RCU-sched merge was not complete as there is still rcu_dereference_sched() for module::kallsyms. The RCU-list modules and unloaded_tainted_modules are always accessed under RCU protection or the module_mutex. The modules list iteration can always happen safely because the module will not disappear. Once the module is removed (free_module()) then after removing the module from the list, there is a synchronize_rcu() which waits until every RCU reader left the section. That means iterating over the list within a RCU-read section is enough, there is no need to disable preemption. module::kallsyms is first assigned in add_kallsyms() before the module is added to the list. At this point, it points to init data. This pointer is later updated and before the init code is removed there is also synchronize_rcu() in do_free_init(). That means A RCU read lock is enough for protection and rcu_dereference() can be safely used. Convert module code and its users step by step. Update comments and convert print_modules() to use RCU. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-3-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/main.c | 9 ++++----- kernel/module/tree_lookup.c | 8 ++++---- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 1fb9ad289a6f8..5f661d5343ac7 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -67,7 +67,7 @@ /* * Mutex protects: - * 1) List of modules (also safely readable with preempt_disable), + * 1) List of modules (also safely readable within RCU read section), * 2) module_use links, * 3) mod_tree.addr_min/mod_tree.addr_max. * (delete and add uses RCU list operations). @@ -1348,7 +1348,7 @@ static void free_module(struct module *mod) mod_tree_remove(mod); /* Remove this module from bug list, this uses list_del_rcu */ module_bug_cleanup(mod); - /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ + /* Wait for RCU synchronizing before releasing mod->list and buglist. */ synchronize_rcu(); if (try_add_tainted_module(mod)) pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", @@ -3049,7 +3049,7 @@ static noinline int do_init_module(struct module *mod) #endif /* * We want to free module_init, but be aware that kallsyms may be - * walking this with preempt disabled. In all the failure paths, we + * walking this within an RCU read section. In all the failure paths, we * call synchronize_rcu(), but we don't want to slow down the success * path. execmem_free() cannot be called in an interrupt, so do the * work and call synchronize_rcu() in a work queue. @@ -3836,7 +3836,7 @@ void print_modules(void) printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ - preempt_disable(); + guard(rcu)(); list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -3844,7 +3844,6 @@ void print_modules(void) } print_unloaded_tainted_modules(); - preempt_enable(); if (last_unloaded_module.name[0]) pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, last_unloaded_module.taints); diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c index 277197977d438..d3204c5c74eb7 100644 --- a/kernel/module/tree_lookup.c +++ b/kernel/module/tree_lookup.c @@ -12,11 +12,11 @@ /* * Use a latched RB-tree for __module_address(); this allows us to use - * RCU-sched lookups of the address from any context. + * RCU lookups of the address from any context. * - * This is conditional on PERF_EVENTS || TRACING because those can really hit - * __module_address() hard by doing a lot of stack unwinding; potentially from - * NMI context. + * This is conditional on PERF_EVENTS || TRACING || CFI_CLANG because those can + * really hit __module_address() hard by doing a lot of stack unwinding; + * potentially from NMI context. */ static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) From 039de46874bb08fef027342c9c5077f2ea85ab42 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:32 +0100 Subject: [PATCH 03/32] module: Use proper RCU assignment in add_kallsyms(). add_kallsyms() assigns the RCU pointer module::kallsyms and setups the structures behind it which point to init-data. The module was not published yet, nothing can see the kallsyms pointer and the data behind it. Also module's init function was not yet invoked. There is no need to use rcu_dereference() here, it is just to keep checkers quiet. The whole RCU read section is also not needed. Use a local kallsyms pointer and setup the data structures. Assign that pointer to the data structure at the end via rcu_assign_pointer(). Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-4-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/kallsyms.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index bf65e0c3c86fc..45846ae4042d1 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -177,19 +177,15 @@ void add_kallsyms(struct module *mod, const struct load_info *info) unsigned long strtab_size; void *data_base = mod->mem[MOD_DATA].base; void *init_data_base = mod->mem[MOD_INIT_DATA].base; + struct mod_kallsyms *kallsyms; - /* Set up to point into init section. */ - mod->kallsyms = (void __rcu *)init_data_base + - info->mod_kallsyms_init_off; + kallsyms = init_data_base + info->mod_kallsyms_init_off; - rcu_read_lock(); - /* The following is safe since this pointer cannot change */ - rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr; - rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + kallsyms->symtab = (void *)symsec->sh_addr; + kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); /* Make sure we get permanent strtab: don't use info->strtab. */ - rcu_dereference(mod->kallsyms)->strtab = - (void *)info->sechdrs[info->index.str].sh_addr; - rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs; + kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + kallsyms->typetab = init_data_base + info->init_typeoffs; /* * Now populate the cut down core kallsyms for after init @@ -199,20 +195,19 @@ void add_kallsyms(struct module *mod, const struct load_info *info) mod->core_kallsyms.strtab = s = data_base + info->stroffs; mod->core_kallsyms.typetab = data_base + info->core_typeoffs; strtab_size = info->core_typeoffs - info->stroffs; - src = rcu_dereference(mod->kallsyms)->symtab; - for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) { - rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info); + src = kallsyms->symtab; + for (ndst = i = 0; i < kallsyms->num_symtab; i++) { + kallsyms->typetab[i] = elf_type(src + i, info); if (i == 0 || is_livepatch_module(mod) || is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { ssize_t ret; mod->core_kallsyms.typetab[ndst] = - rcu_dereference(mod->kallsyms)->typetab[i]; + kallsyms->typetab[i]; dst[ndst] = src[i]; dst[ndst++].st_name = s - mod->core_kallsyms.strtab; - ret = strscpy(s, - &rcu_dereference(mod->kallsyms)->strtab[src[i].st_name], + ret = strscpy(s, &kallsyms->strtab[src[i].st_name], strtab_size); if (ret < 0) break; @@ -220,7 +215,9 @@ void add_kallsyms(struct module *mod, const struct load_info *info) strtab_size -= ret + 1; } } - rcu_read_unlock(); + + /* Set up to point into init section. */ + rcu_assign_pointer(mod->kallsyms, kallsyms); mod->core_kallsyms.num_symtab = ndst; } From f01369239293e4900af7be673e0fd26fe54f85e1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:33 +0100 Subject: [PATCH 04/32] module: Use RCU in find_kallsyms_symbol(). The modules list and module::kallsyms can be accessed under RCU assumption. Use rcu_dereference() to reference the kallsyms pointer in find_kallsyms_symbol(). Use a RCU section instead of preempt_disable in callers of find_kallsyms_symbol(). Keep the preempt-disable in module_address_lookup() due to __module_address(). Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-5-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/kallsyms.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 45846ae4042d1..3f59d04795572 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -257,7 +257,7 @@ static const char *find_kallsyms_symbol(struct module *mod, { unsigned int i, best = 0; unsigned long nextval, bestval; - struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms); struct module_memory *mod_mem; /* At worse, next value is at end of module */ @@ -329,6 +329,7 @@ int module_address_lookup(unsigned long addr, int ret = 0; struct module *mod; + guard(rcu)(); preempt_disable(); mod = __module_address(addr); if (mod) { @@ -356,7 +357,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) { struct module *mod; - preempt_disable(); + guard(rcu)(); list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -368,12 +369,10 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) goto out; strscpy(symname, sym, KSYM_NAME_LEN); - preempt_enable(); return 0; } } out: - preempt_enable(); return -ERANGE; } From c4fadf38ded553edd6ea78ec6cc819e8fdd6f9d3 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:34 +0100 Subject: [PATCH 05/32] module: Use RCU in module_get_kallsym(). The modules list and module::kallsyms can be accessed under RCU assumption. Iterate the modules with RCU protection, use rcu_dereference() to access the kallsyms pointer. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-6-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/kallsyms.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 3f59d04795572..4eef518204eb5 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -381,13 +381,13 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, { struct module *mod; - preempt_disable(); + guard(rcu)(); list_for_each_entry_rcu(mod, &modules, list) { struct mod_kallsyms *kallsyms; if (mod->state == MODULE_STATE_UNFORMED) continue; - kallsyms = rcu_dereference_sched(mod->kallsyms); + kallsyms = rcu_dereference(mod->kallsyms); if (symnum < kallsyms->num_symtab) { const Elf_Sym *sym = &kallsyms->symtab[symnum]; @@ -396,12 +396,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); strscpy(module_name, mod->name, MODULE_NAME_LEN); *exported = is_exported(name, *value, mod); - preempt_enable(); return 0; } symnum -= kallsyms->num_symtab; } - preempt_enable(); return -ERANGE; } From febaa65c94e0db795ec52c45bf7ede524cdce63c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:35 +0100 Subject: [PATCH 06/32] module: Use RCU in find_module_all(). The modules list and module::kallsyms can be accessed under RCU assumption. Remove module_assert_mutex_or_preempt() from find_module_all() so it can be used under RCU protection without warnings. Update its callers to use RCU protection instead of preempt_disable(). Cc: Jiri Kosina Cc: Joe Lawrence Cc: Josh Poimboeuf Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Miroslav Benes Cc: Petr Mladek Cc: Steven Rostedt Cc: linux-trace-kernel@vger.kernel.org Cc: live-patching@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/20250108090457.512198-7-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- include/linux/module.h | 2 +- kernel/livepatch/core.c | 4 +--- kernel/module/kallsyms.c | 1 + kernel/module/main.c | 6 ++---- kernel/trace/trace_kprobe.c | 9 +++------ 5 files changed, 8 insertions(+), 14 deletions(-) diff --git a/include/linux/module.h b/include/linux/module.h index 30e5b19bafa98..0516f5ea9153b 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -666,7 +666,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod) return within_module_init(addr, mod) || within_module_core(addr, mod); } -/* Search for module by name: must be in a RCU-sched critical section. */ +/* Search for module by name: must be in a RCU critical section. */ struct module *find_module(const char *name); extern void __noreturn __module_put_and_kthread_exit(struct module *mod, diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 0cd39954d5a10..abea193977d21 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -59,7 +59,7 @@ static void klp_find_object_module(struct klp_object *obj) if (!klp_is_module(obj)) return; - rcu_read_lock_sched(); + guard(rcu)(); /* * We do not want to block removal of patched modules and therefore * we do not take a reference here. The patches are removed by @@ -75,8 +75,6 @@ static void klp_find_object_module(struct klp_object *obj) */ if (mod && mod->klp_alive) obj->mod = mod; - - rcu_read_unlock_sched(); } static bool klp_initialized(void) diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 4eef518204eb5..3cba9f933b24f 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -450,6 +450,7 @@ unsigned long module_kallsyms_lookup_name(const char *name) unsigned long ret; /* Don't lock: we're in enough trouble already. */ + guard(rcu)(); preempt_disable(); ret = __module_kallsyms_lookup_name(name); preempt_enable(); diff --git a/kernel/module/main.c b/kernel/module/main.c index 5f661d5343ac7..cdd403b940b0d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -374,16 +374,14 @@ bool find_symbol(struct find_symbol_arg *fsa) } /* - * Search for module by name: must hold module_mutex (or preempt disabled - * for read-only access). + * Search for module by name: must hold module_mutex (or RCU for read-only + * access). */ struct module *find_module_all(const char *name, size_t len, bool even_unformed) { struct module *mod; - module_assert_mutex_or_preempt(); - list_for_each_entry_rcu(mod, &modules, list, lockdep_is_held(&module_mutex)) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d8d5f18a141ad..48057531ee4eb 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -124,9 +124,8 @@ static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk) if (!p) return true; *p = '\0'; - rcu_read_lock_sched(); - ret = !!find_module(tk->symbol); - rcu_read_unlock_sched(); + scoped_guard(rcu) + ret = !!find_module(tk->symbol); *p = ':'; return ret; @@ -796,12 +795,10 @@ static struct module *try_module_get_by_name(const char *name) { struct module *mod; - rcu_read_lock_sched(); + guard(rcu)(); mod = find_module(name); if (mod && !try_module_get(mod)) mod = NULL; - rcu_read_unlock_sched(); - return mod; } #else From f27d8ed6a636b3dc5697e1a3427fd7a243112e81 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:36 +0100 Subject: [PATCH 07/32] module: Use RCU in __find_kallsyms_symbol_value(). module::kallsyms can be accessed under RCU assumption. Use rcu_dereference() to access module::kallsyms. Update callers. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-8-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/kallsyms.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 3cba9f933b24f..e3c55bc879c11 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -407,7 +407,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name) { unsigned int i; - struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); + struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms); for (i = 0; i < kallsyms->num_symtab; i++) { const Elf_Sym *sym = &kallsyms->symtab[i]; @@ -447,24 +447,15 @@ static unsigned long __module_kallsyms_lookup_name(const char *name) /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name) { - unsigned long ret; - /* Don't lock: we're in enough trouble already. */ guard(rcu)(); - preempt_disable(); - ret = __module_kallsyms_lookup_name(name); - preempt_enable(); - return ret; + return __module_kallsyms_lookup_name(name); } unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) { - unsigned long ret; - - preempt_disable(); - ret = __find_kallsyms_symbol_value(mod, name); - preempt_enable(); - return ret; + guard(rcu)(); + return __find_kallsyms_symbol_value(mod, name); } int module_kallsyms_on_each_symbol(const char *modname, From cdd9335c1302dab190cafbd4dff9697b39b8aa6f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:37 +0100 Subject: [PATCH 08/32] module: Use RCU in module_kallsyms_on_each_symbol(). module::kallsyms can be accessed under RCU assumption. Use rcu_dereference() to access module::kallsyms. Update callers. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-9-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/kallsyms.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index e3c55bc879c11..0e8ec6486d95c 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -476,10 +476,8 @@ int module_kallsyms_on_each_symbol(const char *modname, if (modname && strcmp(modname, mod->name)) continue; - /* Use rcu_dereference_sched() to remain compliant with the sparse tool */ - preempt_disable(); - kallsyms = rcu_dereference_sched(mod->kallsyms); - preempt_enable(); + kallsyms = rcu_dereference_check(mod->kallsyms, + lockdep_is_held(&module_mutex)); for (i = 0; i < kallsyms->num_symtab; i++) { const Elf_Sym *sym = &kallsyms->symtab[i]; From 435bbcc3bef602ce3920382f339d3cd65e46ad23 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:38 +0100 Subject: [PATCH 09/32] module: Remove module_assert_mutex_or_preempt() from try_add_tainted_module(). module_assert_mutex_or_preempt() is not needed in try_add_tainted_module(). The function checks for RCU-sched or the module_mutex to be acquired. The list_for_each_entry_rcu() below does the same check. Remove module_assert_mutex_or_preempt() from try_add_tainted_module(). Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-10-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/tracking.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c index 16742d1c630c6..4fefec5b683c6 100644 --- a/kernel/module/tracking.c +++ b/kernel/module/tracking.c @@ -21,8 +21,6 @@ int try_add_tainted_module(struct module *mod) { struct mod_unload_taint *mod_taint; - module_assert_mutex_or_preempt(); - if (!mod->taints) goto out; From 2ff49f8931be8bed53e65993fd1af68e3cd747cb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:39 +0100 Subject: [PATCH 10/32] module: Use RCU in find_symbol(). module_assert_mutex_or_preempt() is not needed in find_symbol(). The function checks for RCU-sched or the module_mutex to be acquired. The list_for_each_entry_rcu() below does the same check. Remove module_assert_mutex_or_preempt() from try_add_tainted_module(). Use RCU protection to invoke find_symbol() and update callers. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-11-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/main.c | 30 ++++++++++++------------------ kernel/module/version.c | 14 +++++++------- 2 files changed, 19 insertions(+), 25 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index cdd403b940b0d..af7ca713eff0a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -331,7 +331,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, /* * Find an exported symbol and return it, along with, (optional) crc and - * (optional) module which owns it. Needs preempt disabled or module_mutex. + * (optional) module which owns it. Needs RCU or module_mutex. */ bool find_symbol(struct find_symbol_arg *fsa) { @@ -345,8 +345,6 @@ bool find_symbol(struct find_symbol_arg *fsa) struct module *mod; unsigned int i; - module_assert_mutex_or_preempt(); - for (i = 0; i < ARRAY_SIZE(arr); i++) if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) return true; @@ -812,10 +810,9 @@ void __symbol_put(const char *symbol) .gplok = true, }; - preempt_disable(); + guard(rcu)(); BUG_ON(!find_symbol(&fsa)); module_put(fsa.owner); - preempt_enable(); } EXPORT_SYMBOL(__symbol_put); @@ -1369,21 +1366,18 @@ void *__symbol_get(const char *symbol) .warn = true, }; - preempt_disable(); - if (!find_symbol(&fsa)) - goto fail; - if (fsa.license != GPL_ONLY) { - pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", - symbol); - goto fail; + scoped_guard(rcu) { + if (!find_symbol(&fsa)) + return NULL; + if (fsa.license != GPL_ONLY) { + pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", + symbol); + return NULL; + } + if (strong_try_module_get(fsa.owner)) + return NULL; } - if (strong_try_module_get(fsa.owner)) - goto fail; - preempt_enable(); return (void *)kernel_symbol_value(fsa.sym); -fail: - preempt_enable(); - return NULL; } EXPORT_SYMBOL_GPL(__symbol_get); diff --git a/kernel/module/version.c b/kernel/module/version.c index 3718a88683219..2beefeba82d94 100644 --- a/kernel/module/version.c +++ b/kernel/module/version.c @@ -79,17 +79,17 @@ int check_modstruct_version(const struct load_info *info, .name = "module_layout", .gplok = true, }; + bool have_symbol; /* * Since this should be found in kernel (which can't be removed), no - * locking is necessary -- use preempt_disable() to placate lockdep. + * locking is necessary. Regardless use a RCU read section to keep + * lockdep happy. */ - preempt_disable(); - if (!find_symbol(&fsa)) { - preempt_enable(); - BUG(); - } - preempt_enable(); + scoped_guard(rcu) + have_symbol = find_symbol(&fsa); + BUG_ON(!have_symbol); + return check_version(info, "module_layout", mod, fsa.crc); } From 2bee017741f2890131da2fac96c3f2b64cb947b0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:40 +0100 Subject: [PATCH 11/32] module: Use RCU in __is_module_percpu_address(). The modules list can be accessed under RCU assumption. Use RCU protection instead preempt_disable(). Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-12-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/main.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index af7ca713eff0a..5d6f44c09bec2 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -450,8 +450,7 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) struct module *mod; unsigned int cpu; - preempt_disable(); - + guard(rcu)(); list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; @@ -468,13 +467,10 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) per_cpu_ptr(mod->percpu, get_boot_cpu_id()); } - preempt_enable(); return true; } } } - - preempt_enable(); return false; } From 7d9dda6f628fde3d227a69bec6a82160577b79ca Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:41 +0100 Subject: [PATCH 12/32] module: Allow __module_address() to be called from RCU section. mod_find() uses either the modules list to find a module or a tree lookup (CONFIG_MODULES_TREE_LOOKUP). The list and the tree can both be iterated under RCU assumption (as well as RCU-sched). Remove module_assert_mutex_or_preempt() from __module_address() and entirely since __module_address() is the last user. Update comments. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-13-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/internal.h | 11 ----------- kernel/module/main.c | 4 +--- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/kernel/module/internal.h b/kernel/module/internal.h index d09b46ef032f0..626cf8668a7eb 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -124,17 +124,6 @@ char *module_next_tag_pair(char *string, unsigned long *secsize); #define for_each_modinfo_entry(entry, info, name) \ for (entry = get_modinfo(info, name); entry; entry = get_next_modinfo(info, name, entry)) -static inline void module_assert_mutex_or_preempt(void) -{ -#ifdef CONFIG_LOCKDEP - if (unlikely(!debug_locks)) - return; - - WARN_ON_ONCE(!rcu_read_lock_sched_held() && - !lockdep_is_held(&module_mutex)); -#endif -} - static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) { #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS diff --git a/kernel/module/main.c b/kernel/module/main.c index 5d6f44c09bec2..2155814f35dd0 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3749,7 +3749,7 @@ bool is_module_address(unsigned long addr) * __module_address() - get the module which contains an address. * @addr: the address. * - * Must be called with preempt disabled or module mutex held so that + * Must be called within RCU read section or module mutex held so that * module doesn't get freed during this. */ struct module *__module_address(unsigned long addr) @@ -3767,8 +3767,6 @@ struct module *__module_address(unsigned long addr) return NULL; lookup: - module_assert_mutex_or_preempt(); - mod = mod_find(addr, &mod_tree); if (mod) { BUG_ON(!within_module(addr, mod)); From 2abf84f688bed9cb4d3208d9d538b3387d292972 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:42 +0100 Subject: [PATCH 13/32] module: Use RCU in search_module_extables(). search_module_extables() returns an exception_table_entry belonging to a module. The lookup via __module_address() can be performed with RCU protection. The returned exception_table_entry remains valid because the passed address usually belongs to a module that is currently executed. So the module can not be removed because "something else" holds a reference to it, ensuring that it can not be removed. Exceptions here are: - kprobe, acquires a reference on the module beforehand - MCE, invokes the function from within a timer and the RCU lifetime guarantees (of the timer) are sufficient. Therefore it is safe to return the exception_table_entry outside the RCU section which provided the module. Use RCU for the lookup in search_module_extables() and update the comment. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-14-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/main.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 2155814f35dd0..aebb17e1bfb97 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3703,28 +3703,23 @@ char *module_flags(struct module *mod, char *buf, bool show_state) /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr) { - const struct exception_table_entry *e = NULL; struct module *mod; - preempt_disable(); + guard(rcu)(); mod = __module_address(addr); if (!mod) - goto out; + return NULL; if (!mod->num_exentries) - goto out; - - e = search_extable(mod->extable, - mod->num_exentries, - addr); -out: - preempt_enable(); - + return NULL; /* - * Now, if we found one, we are running inside it now, hence - * we cannot unload the module, hence no refcnt needed. + * The address passed here belongs to a module that is currently + * invoked (we are running inside it). Therefore its module::refcnt + * needs already be >0 to ensure that it is not removed at this stage. + * All other user need to invoke this function within a RCU read + * section. */ - return e; + return search_extable(mod->extable, mod->num_exentries, addr); } /** From 6593a2c990f251bce63ab8de0397405303e32381 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:43 +0100 Subject: [PATCH 14/32] module: Use RCU in all users of __module_address(). __module_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_address() with RCU. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-15-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- include/linux/kallsyms.h | 3 +-- kernel/module/kallsyms.c | 5 +---- kernel/module/main.c | 9 ++------- 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 1c6a6c1704d8d..d5dd54c53ace6 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -55,12 +55,11 @@ static inline void *dereference_symbol_descriptor(void *ptr) if (is_ksym_addr((unsigned long)ptr)) return ptr; - preempt_disable(); + guard(rcu)(); mod = __module_address((unsigned long)ptr); if (mod) ptr = dereference_module_function_descriptor(mod, ptr); - preempt_enable(); #endif return ptr; } diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c index 0e8ec6486d95c..00a60796327c0 100644 --- a/kernel/module/kallsyms.c +++ b/kernel/module/kallsyms.c @@ -316,7 +316,7 @@ void * __weak dereference_module_function_descriptor(struct module *mod, /* * For kallsyms to ask for address resolution. NULL means not found. Careful - * not to lock to avoid deadlock on oopses, simply disable preemption. + * not to lock to avoid deadlock on oopses, RCU is enough. */ int module_address_lookup(unsigned long addr, unsigned long *size, @@ -330,7 +330,6 @@ int module_address_lookup(unsigned long addr, struct module *mod; guard(rcu)(); - preempt_disable(); mod = __module_address(addr); if (mod) { if (modname) @@ -348,8 +347,6 @@ int module_address_lookup(unsigned long addr, if (sym) ret = strscpy(namebuf, sym, KSYM_NAME_LEN); } - preempt_enable(); - return ret; } diff --git a/kernel/module/main.c b/kernel/module/main.c index aebb17e1bfb97..8a8b499730d86 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3731,13 +3731,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) */ bool is_module_address(unsigned long addr) { - bool ret; - - preempt_disable(); - ret = __module_address(addr) != NULL; - preempt_enable(); - - return ret; + guard(rcu)(); + return __module_address(addr) != NULL; } /** From d593e0cabdf6428560f311af0657642979b2c8ad Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:44 +0100 Subject: [PATCH 15/32] module: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-16-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/module/main.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 8a8b499730d86..81ecb9d3a9353 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -823,13 +823,12 @@ void symbol_put_addr(void *addr) /* * Even though we hold a reference on the module; we still need to - * disable preemption in order to safely traverse the data structure. + * RCU read section in order to safely traverse the data structure. */ - preempt_disable(); + guard(rcu)(); modaddr = __module_text_address(a); BUG_ON(!modaddr); module_put(modaddr); - preempt_enable(); } EXPORT_SYMBOL_GPL(symbol_put_addr); @@ -3776,20 +3775,15 @@ struct module *__module_address(unsigned long addr) */ bool is_module_text_address(unsigned long addr) { - bool ret; - - preempt_disable(); - ret = __module_text_address(addr) != NULL; - preempt_enable(); - - return ret; + guard(rcu)(); + return __module_text_address(addr) != NULL; } /** * __module_text_address() - get the module whose code contains an address. * @addr: the address. * - * Must be called with preempt disabled or module mutex held so that + * Must be called within RCU read section or module mutex held so that * module doesn't get freed during this. */ struct module *__module_text_address(unsigned long addr) From 59aa1414bf33bc706d6bc9c4dc2cd57b85862a4c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:45 +0100 Subject: [PATCH 16/32] ARM: module: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-17-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- arch/arm/kernel/module-plts.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index da2ee8d6ef1a7..354ce16d83cb5 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -285,11 +285,9 @@ bool in_module_plt(unsigned long loc) struct module *mod; bool ret; - preempt_disable(); + guard(rcu)(); mod = __module_text_address(loc); ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE || loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE); - preempt_enable(); - return ret; } From 17a9992dd7e8388d13eab85d4099c0ddfc53b043 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:46 +0100 Subject: [PATCH 17/32] arm64: module: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Cc: Catalin Marinas Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Steven Rostedt Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: linux-trace-kernel@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-18-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- arch/arm64/kernel/ftrace.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index d7c0d023dfe5e..5a890714ee2e9 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -320,14 +320,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, * dealing with an out-of-range condition, we can assume it * is due to a module being loaded far away from the kernel. * - * NOTE: __module_text_address() must be called with preemption - * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * NOTE: __module_text_address() must be called within a RCU read + * section, but we can rely on ftrace_lock to ensure that 'mod' * retains its validity throughout the remainder of this code. */ if (!mod) { - preempt_disable(); + guard(rcu)(); mod = __module_text_address(pc); - preempt_enable(); } if (WARN_ON(!mod)) From f99d27d9feb755aee9350fc89f57814d7e1b4880 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:47 +0100 Subject: [PATCH 18/32] LoongArch/orc: Use RCU in all users of __module_address(). __module_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_address() with RCU. Cc: Huacai Chen Cc: WANG Xuerui Cc: loongarch@lists.linux.dev Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-19-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- arch/loongarch/kernel/unwind_orc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c index b257228763317..d623935a75471 100644 --- a/arch/loongarch/kernel/unwind_orc.c +++ b/arch/loongarch/kernel/unwind_orc.c @@ -399,7 +399,7 @@ bool unwind_next_frame(struct unwind_state *state) return false; /* Don't let modules unload while we're reading their ORC data. */ - preempt_disable(); + guard(rcu)(); if (is_entry_func(state->pc)) goto end; @@ -514,14 +514,12 @@ bool unwind_next_frame(struct unwind_state *state) if (!__kernel_text_address(state->pc)) goto err; - preempt_enable(); return true; err: state->error = true; end: - preempt_enable(); state->stack_info.type = STACK_TYPE_UNKNOWN; return false; } From 18d83c3654c3d789e9e459e3d070dd20cc9e1ee2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:48 +0100 Subject: [PATCH 19/32] LoongArch: ftrace: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Cc: Huacai Chen Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Steven Rostedt Cc: WANG Xuerui Cc: linux-trace-kernel@vger.kernel.org Cc: loongarch@lists.linux.dev Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-20-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- arch/loongarch/kernel/ftrace_dyn.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c index 25c9a4cfd5fa9..d5d81d74034c8 100644 --- a/arch/loongarch/kernel/ftrace_dyn.c +++ b/arch/loongarch/kernel/ftrace_dyn.c @@ -85,14 +85,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod * dealing with an out-of-range condition, we can assume it * is due to a module being loaded far away from the kernel. * - * NOTE: __module_text_address() must be called with preemption - * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * NOTE: __module_text_address() must be called within a RCU read + * section, but we can rely on ftrace_lock to ensure that 'mod' * retains its validity throughout the remainder of this code. */ if (!mod) { - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); + scoped_guard(rcu) + mod = __module_text_address(pc); } if (WARN_ON(!mod)) From ccf74e79ea3537bd697ca587192aae424d160235 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:49 +0100 Subject: [PATCH 20/32] powerpc/ftrace: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Cc: Steven Rostedt Cc: linux-trace-kernel@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Tested-by: Shrikanth Hegde Link: https://lore.kernel.org/r/20250108090457.512198-21-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- arch/powerpc/kernel/trace/ftrace.c | 6 ++---- arch/powerpc/kernel/trace/ftrace_64_pg.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 2f776f137a89e..6dca92d5a6e82 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -115,10 +115,8 @@ static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long a { struct module *mod = NULL; - preempt_disable(); - mod = __module_text_address(ip); - preempt_enable(); - + scoped_guard(rcu) + mod = __module_text_address(ip); if (!mod) pr_err("No module loaded at addr=%lx\n", ip); diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c index ac35015f04c6a..5c6e545d1708c 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_pg.c +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c @@ -120,10 +120,8 @@ static struct module *ftrace_lookup_module(struct dyn_ftrace *rec) { struct module *mod; - preempt_disable(); - mod = __module_text_address(rec->ip); - preempt_enable(); - + scoped_guard(rcu) + mod = __module_text_address(rec->ip); if (!mod) pr_err("No module loaded at addr=%lx\n", rec->ip); From e151955bacf881e1f918613558e04f2f976169e4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:50 +0100 Subject: [PATCH 21/32] cfi: Use RCU while invoking __module_address(). __module_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. The _notrace() variant was introduced in commit 14c4c8e41511a ("cfi: Use rcu_read_{un}lock_sched_notrace"). The recursive case where __cfi_slowpath_diag() could end up calling itself is no longer present, as all that logic is gone since commit 89245600941e ("cfi: Switch to -fsanitize=kcfi"). Sami Tolvanen said that KCFI checks don't perform function calls. Elliot Berman verified it with | modprobe -a dummy_stm stm_ftrace stm_p_basic | mkdir -p /sys/kernel/config/stp-policy/dummy_stm.0.my-policy/default | echo function > /sys/kernel/tracing/current_tracer | echo 1 > /sys/kernel/tracing/tracing_on | echo dummy_stm.0 > /sys/class/stm_source/ftrace/stm_source_link Replace the rcu_read_lock_sched_notrace() section around __module_address() with RCU. Cc: Elliot Berman Cc: Kees Cook Cc: Nathan Chancellor Cc: Sami Tolvanen Cc: Steven Rostedt Cc: llvm@lists.linux.dev Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Tested-by: Elliot Berman # sm8650-qrd [1] Link: https://lore.kernel.org/all/20241230185812429-0800.eberman@hu-eberman-lv.qualcomm.com [1] Link: https://lore.kernel.org/r/20250108090457.512198-22-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/cfi.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/cfi.c b/kernel/cfi.c index 08caad7767176..abcd4d1f98eab 100644 --- a/kernel/cfi.c +++ b/kernel/cfi.c @@ -71,14 +71,11 @@ static bool is_module_cfi_trap(unsigned long addr) struct module *mod; bool found = false; - rcu_read_lock_sched_notrace(); - + guard(rcu)(); mod = __module_address(addr); if (mod) found = is_trap(addr, mod->kcfi_traps, mod->kcfi_traps_end); - rcu_read_unlock_sched_notrace(); - return found; } #else /* CONFIG_MODULES */ From 14daa3bca2176024704ceac1b476712309352261 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:51 +0100 Subject: [PATCH 22/32] x86: Use RCU in all users of __module_address(). __module_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_address() with RCU. Cc: H. Peter Anvin Cc: Borislav Petkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: x86@kernel.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-23-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- arch/x86/kernel/callthunks.c | 3 +-- arch/x86/kernel/unwind_orc.c | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c index 8418a892d195a..251b65f2ab216 100644 --- a/arch/x86/kernel/callthunks.c +++ b/arch/x86/kernel/callthunks.c @@ -98,11 +98,10 @@ static inline bool within_module_coretext(void *addr) #ifdef CONFIG_MODULES struct module *mod; - preempt_disable(); + guard(rcu)(); mod = __module_address((unsigned long)addr); if (mod && within_module_core((unsigned long)addr, mod)) ret = true; - preempt_enable(); #endif return ret; } diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index d4705a348a804..977ee75e047c8 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -476,7 +476,7 @@ bool unwind_next_frame(struct unwind_state *state) return false; /* Don't let modules unload while we're reading their ORC data. */ - preempt_disable(); + guard(rcu)(); /* End-of-stack check for user tasks: */ if (state->regs && user_mode(state->regs)) @@ -669,14 +669,12 @@ bool unwind_next_frame(struct unwind_state *state) goto err; } - preempt_enable(); return true; err: state->error = true; the_end: - preempt_enable(); state->stack_info.type = STACK_TYPE_UNKNOWN; return false; } From 4038131fdf3dfecd5299040bc398cd8908e701ce Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:52 +0100 Subject: [PATCH 23/32] jump_label: Use RCU in all users of __module_address(). __module_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_address() with RCU. Cc: Ard Biesheuvel Cc: Jason Baron Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-24-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/jump_label.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 93a822d3c468c..7fcf4017cb383 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -746,9 +746,9 @@ static int jump_label_add_module(struct module *mod) kfree(jlm); return -ENOMEM; } - preempt_disable(); - jlm2->mod = __module_address((unsigned long)key); - preempt_enable(); + scoped_guard(rcu) + jlm2->mod = __module_address((unsigned long)key); + jlm2->entries = static_key_entries(key); jlm2->next = NULL; static_key_set_mod(key, jlm2); @@ -906,13 +906,13 @@ static void jump_label_update(struct static_key *key) return; } - preempt_disable(); - mod = __module_address((unsigned long)key); - if (mod) { - stop = mod->jump_entries + mod->num_jump_entries; - init = mod->state == MODULE_STATE_COMING; + scoped_guard(rcu) { + mod = __module_address((unsigned long)key); + if (mod) { + stop = mod->jump_entries + mod->num_jump_entries; + init = mod->state == MODULE_STATE_COMING; + } } - preempt_enable(); #endif entry = static_key_entries(key); /* if there are no users, entry can be NULL */ From 72ee1c20b0b5ee0e303cc4fbe2bae6523e382451 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:53 +0100 Subject: [PATCH 24/32] jump_label: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Cc: Ard Biesheuvel Cc: Jason Baron Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-25-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/jump_label.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 7fcf4017cb383..7cb19e6014266 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -653,13 +653,12 @@ static int __jump_label_mod_text_reserved(void *start, void *end) struct module *mod; int ret; - preempt_disable(); - mod = __module_text_address((unsigned long)start); - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); - if (!try_module_get(mod)) - mod = NULL; - preempt_enable(); - + scoped_guard(rcu) { + mod = __module_text_address((unsigned long)start); + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); + if (!try_module_get(mod)) + mod = NULL; + } if (!mod) return 0; From 8c6eb7ca8600a2704b1fb7f1abe1f7c49a0d970a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 29 Jan 2025 09:47:51 +0100 Subject: [PATCH 25/32] bpf: Use RCU in all users of __module_text_address(). __module_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_address() with RCU. Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Daniel Borkmann Cc: Eduard Zingerman Cc: Hao Luo Cc: Jiri Olsa Cc: John Fastabend Cc: KP Singh Cc: Martin KaFai Lau Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Matt Bobrowski Cc: Song Liu Cc: Stanislav Fomichev Cc: Steven Rostedt Cc: Yonghong Song Cc: bpf@vger.kernel.org Cc: linux-trace-kernel@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Acked-by: Alexei Starovoitov Link: https://lore.kernel.org/r/20250129084751.tH6iidUO@linutronix.de Signed-off-by: Petr Pavlu --- kernel/trace/bpf_trace.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index adc947587eb81..e6a17a60d8787 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2345,10 +2345,9 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) { struct module *mod; - preempt_disable(); + guard(rcu)(); mod = __module_address((unsigned long)btp); module_put(mod); - preempt_enable(); } static __always_inline @@ -2932,18 +2931,21 @@ static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u3 u32 i, err = 0; for (i = 0; i < addrs_cnt; i++) { + bool skip_add = false; struct module *mod; - preempt_disable(); - mod = __module_address(addrs[i]); - /* Either no module or we it's already stored */ - if (!mod || has_module(&arr, mod)) { - preempt_enable(); - continue; + scoped_guard(rcu) { + mod = __module_address(addrs[i]); + /* Either no module or it's already stored */ + if (!mod || has_module(&arr, mod)) { + skip_add = true; + break; /* scoped_guard */ + } + if (!try_module_get(mod)) + err = -EINVAL; } - if (!try_module_get(mod)) - err = -EINVAL; - preempt_enable(); + if (skip_add) + continue; if (err) break; err = add_module(&arr, mod); From 7e74a7c00aefdcd0941d98873fce402d93ef07dc Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 29 Jan 2025 09:49:25 +0100 Subject: [PATCH 26/32] kprobes: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Cc: David S. Miller Cc: Anil S Keshavamurthy Cc: Masami Hiramatsu Cc: Naveen N Rao Cc: linux-trace-kernel@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250129084925.9ppBjGLC@linutronix.de Signed-off-by: Petr Pavlu --- kernel/kprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 88aeac84e4c05..ffe0c3d523063 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1547,7 +1547,7 @@ static int check_kprobe_address_safe(struct kprobe *p, /* Ensure the address is in a text area, and find a module if exists. */ *probed_mod = NULL; if (!core_kernel_text((unsigned long) p->addr)) { - guard(preempt)(); + guard(rcu)(); *probed_mod = __module_text_address((unsigned long) p->addr); if (!(*probed_mod)) return -EINVAL; From 3983da398c26962e1d7ce691e83c968f7587d285 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:56 +0100 Subject: [PATCH 27/32] static_call: Use RCU in all users of __module_text_address(). __module_text_address() can be invoked within a RCU section, there is no requirement to have preemption disabled. Replace the preempt_disable() section around __module_text_address() with RCU. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-28-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- kernel/static_call_inline.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c index bb7d066a7c397..c2c59e6ef35d0 100644 --- a/kernel/static_call_inline.c +++ b/kernel/static_call_inline.c @@ -325,13 +325,12 @@ static int __static_call_mod_text_reserved(void *start, void *end) struct module *mod; int ret; - preempt_disable(); - mod = __module_text_address((unsigned long)start); - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); - if (!try_module_get(mod)) - mod = NULL; - preempt_enable(); - + scoped_guard(rcu) { + mod = __module_text_address((unsigned long)start); + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); + if (!try_module_get(mod)) + mod = NULL; + } if (!mod) return 0; From f47d2a3f7542a5507b4072fb768a2258071ff519 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 8 Jan 2025 10:04:57 +0100 Subject: [PATCH 28/32] bug: Use RCU instead RCU-sched to protect module_bug_list. The list module_bug_list relies on module_mutex for writer synchronisation. The list is already RCU style. The list removal is synchronized with modules' synchronize_rcu() in free_module(). Use RCU read lock protection instead of RCU-sched. Cc: Andrew Morton Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250108090457.512198-29-bigeasy@linutronix.de Signed-off-by: Petr Pavlu --- lib/bug.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/lib/bug.c b/lib/bug.c index e0ff219899902..b1f07459c2ee3 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -66,23 +66,19 @@ static LIST_HEAD(module_bug_list); static struct bug_entry *module_find_bug(unsigned long bugaddr) { + struct bug_entry *bug; struct module *mod; - struct bug_entry *bug = NULL; - rcu_read_lock_sched(); + guard(rcu)(); list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { unsigned i; bug = mod->bug_table; for (i = 0; i < mod->num_bugs; ++i, ++bug) if (bugaddr == bug_addr(bug)) - goto out; + return bug; } - bug = NULL; -out: - rcu_read_unlock_sched(); - - return bug; + return NULL; } void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, @@ -235,11 +231,11 @@ void generic_bug_clear_once(void) #ifdef CONFIG_MODULES struct module *mod; - rcu_read_lock_sched(); - list_for_each_entry_rcu(mod, &module_bug_list, bug_list) - clear_once_table(mod->bug_table, - mod->bug_table + mod->num_bugs); - rcu_read_unlock_sched(); + scoped_guard(rcu) { + list_for_each_entry_rcu(mod, &module_bug_list, bug_list) + clear_once_table(mod->bug_table, + mod->bug_table + mod->num_bugs); + } #endif clear_once_table(__start___bug_table, __stop___bug_table); From 3690f4a86005b4b15fabc606d2bbf39bb6290aff Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Thu, 13 Feb 2025 23:13:52 +0100 Subject: [PATCH 29/32] params: Annotate struct module_param_attrs with __counted_by() Add the __counted_by compiler attribute to the flexible array member attrs to improve access bounds-checking via CONFIG_UBSAN_BOUNDS and CONFIG_FORTIFY_SOURCE. Increment num before adding a new param_attribute to the attrs array and adjust the array index accordingly. Increment num immediately after the first reallocation such that the reallocation for the NULL terminator only needs to add 1 (instead of 2) to mk->mp->num. Use struct_size() instead of manually calculating the size for the reallocation. Use krealloc_array() for the additional NULL terminator. Cc: Andy Shevchenko Cc: Luis Chamberlain Cc: Nathan Chancellor Signed-off-by: Thorsten Blum Reviewed-by: Luis Chamberlain Link: https://lore.kernel.org/r/20250213221352.2625-3-thorsten.blum@linux.dev Signed-off-by: Petr Pavlu --- kernel/params.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/kernel/params.c b/kernel/params.c index 0074d29c9b80c..2509f216c9f3c 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -551,7 +551,7 @@ struct module_param_attrs { unsigned int num; struct attribute_group grp; - struct param_attribute attrs[]; + struct param_attribute attrs[] __counted_by(num); }; #ifdef CONFIG_SYSFS @@ -651,35 +651,32 @@ static __modinit int add_sysfs_param(struct module_kobject *mk, } /* Enlarge allocations. */ - new_mp = krealloc(mk->mp, - sizeof(*mk->mp) + - sizeof(mk->mp->attrs[0]) * (mk->mp->num + 1), + new_mp = krealloc(mk->mp, struct_size(mk->mp, attrs, mk->mp->num + 1), GFP_KERNEL); if (!new_mp) return -ENOMEM; mk->mp = new_mp; + mk->mp->num++; /* Extra pointer for NULL terminator */ - new_attrs = krealloc(mk->mp->grp.attrs, - sizeof(mk->mp->grp.attrs[0]) * (mk->mp->num + 2), - GFP_KERNEL); + new_attrs = krealloc_array(mk->mp->grp.attrs, mk->mp->num + 1, + sizeof(mk->mp->grp.attrs[0]), GFP_KERNEL); if (!new_attrs) return -ENOMEM; mk->mp->grp.attrs = new_attrs; /* Tack new one on the end. */ - memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0])); - sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); - mk->mp->attrs[mk->mp->num].param = kp; - mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; + memset(&mk->mp->attrs[mk->mp->num - 1], 0, sizeof(mk->mp->attrs[0])); + sysfs_attr_init(&mk->mp->attrs[mk->mp->num - 1].mattr.attr); + mk->mp->attrs[mk->mp->num - 1].param = kp; + mk->mp->attrs[mk->mp->num - 1].mattr.show = param_attr_show; /* Do not allow runtime DAC changes to make param writable. */ if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) - mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; + mk->mp->attrs[mk->mp->num - 1].mattr.store = param_attr_store; else - mk->mp->attrs[mk->mp->num].mattr.store = NULL; - mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; - mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; - mk->mp->num++; + mk->mp->attrs[mk->mp->num - 1].mattr.store = NULL; + mk->mp->attrs[mk->mp->num - 1].mattr.attr.name = (char *)name; + mk->mp->attrs[mk->mp->num - 1].mattr.attr.mode = kp->perm; /* Fix up all the pointers, since krealloc can move us */ for (i = 0; i < mk->mp->num; i++) From 6380bf8ff90213282a7a7f99697964a9c2bd6899 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 7 Mar 2025 12:35:47 +0100 Subject: [PATCH 30/32] module: Replace deprecated strncpy() with strscpy() strncpy() is deprecated for NUL-terminated destination buffers; use strscpy() instead. The destination buffer ownername is only used with "%s" format strings and must therefore be NUL-terminated, but not NUL- padded. No functional changes intended. Link: https://github.com/KSPP/linux/issues/90 Cc: linux-hardening@vger.kernel.org Signed-off-by: Thorsten Blum Link: https://lore.kernel.org/r/20250307113546.112237-2-thorsten.blum@linux.dev Signed-off-by: Petr Pavlu --- kernel/module/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 81ecb9d3a9353..e1ed5d82f45fe 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1179,7 +1179,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, getname: /* We must make copy under the lock if we failed to get ref. */ - strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); + strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); unlock: mutex_unlock(&module_mutex); return fsa.sym; From 085c5e37427d22381efa6e8a660dd810e8ef36ef Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 8 Mar 2025 20:46:32 +0100 Subject: [PATCH 31/32] module: Remove unnecessary size argument when calling strscpy() The size parameter is optional and strscpy() automatically determines the length of the destination buffer using sizeof() if the argument is omitted. This makes the explicit sizeof() unnecessary. Remove it to shorten and simplify the code. Signed-off-by: Thorsten Blum Link: https://lore.kernel.org/r/20250308194631.191670-2-thorsten.blum@linux.dev Signed-off-by: Petr Pavlu --- kernel/module/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index e1ed5d82f45fe..9195849e13c4d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -787,8 +787,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, async_synchronize_full(); /* Store the name and taints of the last unloaded module for diagnostic purposes */ - strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); - strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); + strscpy(last_unloaded_module.name, mod->name); + strscpy(last_unloaded_module.taints, module_flags(mod, buf, false)); free_module(mod); /* someone could wait for the module in add_unformed_module() */ From 897c0b4e27135132dc5b348c1a3773d059668489 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Thu, 6 Mar 2025 17:20:59 +0100 Subject: [PATCH 32/32] MAINTAINERS: Update the MODULE SUPPORT section Change my role for MODULE SUPPORT from a reviewer to a maintainer. We started to rotate its maintainership and I currently look after the modules tree. This not being reflected in MAINTAINERS proved to confuse folks. Add lib/tests/module/ and tools/testing/selftests/module/ to maintained files. They were introduced previously by commit 84b4a51fce4c ("selftests: add new kallsyms selftests"). Acked-by: Steven Rostedt (Google) Reviewed-by: Luis Chamberlain Link: https://lore.kernel.org/r/20250306162117.18876-1-petr.pavlu@suse.com Signed-off-by: Petr Pavlu --- MAINTAINERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index ed7aa6867674e..cacaf564a1885 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15982,7 +15982,7 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h MODULE SUPPORT M: Luis Chamberlain -R: Petr Pavlu +M: Petr Pavlu R: Sami Tolvanen R: Daniel Gomez L: linux-modules@vger.kernel.org @@ -15993,8 +15993,10 @@ F: include/linux/kmod.h F: include/linux/module*.h F: kernel/module/ F: lib/test_kmod.c +F: lib/tests/module/ F: scripts/module* F: tools/testing/selftests/kmod/ +F: tools/testing/selftests/module/ MONOLITHIC POWER SYSTEM PMIC DRIVER M: Saravanan Sekar