Skip to content

Commit

Permalink
tracing: Have persistent trace instances save module addresses
Browse files Browse the repository at this point in the history
For trace instances that are mapped to persistent memory, have them use
the scratch area to save the currently loaded modules. This will allow
where the modules have been loaded on the next boot so that their
addresses can be deciphered by using where they were loaded previously.

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250305164609.129741650@goodmis.org
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt committed Mar 28, 2025
1 parent 966b7d0 commit fd39e48
Showing 1 changed file with 89 additions and 9 deletions.
98 changes: 89 additions & 9 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -5988,14 +5988,60 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
return __tracing_resize_ring_buffer(tr, size, cpu_id);
}

struct trace_mod_entry {
unsigned long mod_addr;
char mod_name[MODULE_NAME_LEN];
};

struct trace_scratch {
unsigned long kaslr_addr;
unsigned long nr_entries;
struct trace_mod_entry entries[];
};

static int save_mod(struct module *mod, void *data)
{
struct trace_array *tr = data;
struct trace_scratch *tscratch;
struct trace_mod_entry *entry;
unsigned int size;

tscratch = tr->scratch;
if (!tscratch)
return -1;
size = tr->scratch_size;

if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
return -1;

entry = &tscratch->entries[tscratch->nr_entries];

tscratch->nr_entries++;

entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
strscpy(entry->mod_name, mod->name);

return 0;
}

static void update_last_data(struct trace_array *tr)
{
struct trace_scratch *tscratch;

if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
return;

/* Reset the module list and reload them */
if (tr->scratch) {
struct trace_scratch *tscratch = tr->scratch;

memset(tscratch->entries, 0,
flex_array_size(tscratch, entries, tscratch->nr_entries));
tscratch->nr_entries = 0;

module_for_each_mod(save_mod, tr);
}

if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
return;

Expand Down Expand Up @@ -9224,6 +9270,46 @@ static struct dentry *trace_instance_dir;
static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);

static void setup_trace_scratch(struct trace_array *tr,
struct trace_scratch *tscratch, unsigned int size)
{
struct trace_mod_entry *entry;

if (!tscratch)
return;

tr->scratch = tscratch;
tr->scratch_size = size;

#ifdef CONFIG_RANDOMIZE_BASE
if (tscratch->kaslr_addr)
tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
#endif

if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
goto reset;

/* Check if each module name is a valid string */
for (int i = 0; i < tscratch->nr_entries; i++) {
int n;

entry = &tscratch->entries[i];

for (n = 0; n < MODULE_NAME_LEN; n++) {
if (entry->mod_name[n] == '\0')
break;
if (!isprint(entry->mod_name[n]))
goto reset;
}
if (n == MODULE_NAME_LEN)
goto reset;
}
return;
reset:
/* Invalid trace modules */
memset(tscratch, 0, size);
}

static int
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
{
Expand All @@ -9236,21 +9322,15 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
buf->tr = tr;

if (tr->range_addr_start && tr->range_addr_size) {
/* Add scratch buffer to handle 128 modules */
buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
tr->range_addr_start,
tr->range_addr_size,
sizeof(*tscratch));
struct_size(tscratch, entries, 128));

tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
if (tscratch) {
tr->scratch = tscratch;
tr->scratch_size = scratch_size;
setup_trace_scratch(tr, tscratch, scratch_size);

#ifdef CONFIG_RANDOMIZE_BASE
if (tscratch->kaslr_addr)
tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
#endif
}
/*
* This is basically the same as a mapped buffer,
* with the same restrictions.
Expand Down

0 comments on commit fd39e48

Please sign in to comment.