Skip to content

Commit

Permalink
tracing: Have persistent trace instances save KASLR offset
Browse files Browse the repository at this point in the history
There's no reason to save the KASLR offset for the ring buffer itself.
That is used by the tracer. Now that the tracer has a way to save data in
the persistent memory of the ring buffer, have the tracing infrastructure
take care of the saving of the KASLR offset.

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250305164608.792722274@goodmis.org
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt committed Mar 28, 2025
1 parent 4af0a9c commit b653348
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 57 deletions.
1 change: 0 additions & 1 deletion include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flag
unsigned long scratch_size,
struct lock_class_key *key);

bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kaslr_addr);
void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size);

/*
Expand Down
59 changes: 10 additions & 49 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ struct ring_buffer_meta {
};

struct ring_buffer_cpu_meta {
unsigned long kaslr_addr;
unsigned long first_buffer;
unsigned long head_buffer;
unsigned long commit_buffer;
Expand Down Expand Up @@ -557,8 +556,6 @@ struct trace_buffer {

struct ring_buffer_meta *meta;

unsigned long kaslr_addr;

unsigned int subbuf_size;
unsigned int subbuf_order;
unsigned int max_data_size;
Expand Down Expand Up @@ -1949,15 +1946,6 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
}
}

static void rb_meta_init_text_addr(struct ring_buffer_cpu_meta *meta)
{
#ifdef CONFIG_RANDOMIZE_BASE
meta->kaslr_addr = kaslr_offset();
#else
meta->kaslr_addr = 0;
#endif
}

static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size)
{
struct ring_buffer_cpu_meta *meta;
Expand Down Expand Up @@ -1990,7 +1978,6 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int sc
meta->first_buffer += delta;
meta->head_buffer += delta;
meta->commit_buffer += delta;
buffer->kaslr_addr = meta->kaslr_addr;
continue;
}

Expand All @@ -2007,7 +1994,6 @@ static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int sc
subbuf = rb_subbufs_from_meta(meta);

meta->first_buffer = (unsigned long)subbuf;
rb_meta_init_text_addr(meta);

/*
* The buffers[] array holds the order of the sub-buffers
Expand Down Expand Up @@ -2549,35 +2535,22 @@ struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flag
scratch_size, key);
}

/**
* ring_buffer_last_boot_delta - return the delta offset from last boot
* @buffer: The buffer to return the delta from
* @text: Return text delta
* @data: Return data delta
*
* Returns: The true if the delta is non zero
*/
bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, unsigned long *kaslr_addr)
{
if (!buffer)
return false;

if (!buffer->kaslr_addr)
return false;

*kaslr_addr = buffer->kaslr_addr;

return true;
}

void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
{
struct ring_buffer_meta *meta;
void *ptr;

if (!buffer || !buffer->meta)
return NULL;

*size = PAGE_SIZE - sizeof(*buffer->meta);
meta = buffer->meta;

return (void *)buffer->meta + sizeof(*buffer->meta);
ptr = (void *)ALIGN((unsigned long)meta + sizeof(*meta), sizeof(long));

if (size)
*size = (void *)meta + meta->buffers_offset - ptr;

return ptr;
}

/**
Expand Down Expand Up @@ -6133,7 +6106,6 @@ static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_cpu_meta *meta;

if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
Expand All @@ -6152,11 +6124,6 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->resize_disabled);

/* Make sure persistent meta now uses this buffer's addresses */
meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
if (meta)
rb_meta_init_text_addr(meta);

mutex_unlock(&buffer->mutex);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Expand All @@ -6171,7 +6138,6 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_cpu_meta *meta;
int cpu;

/* prevent another thread from changing buffer sizes */
Expand Down Expand Up @@ -6199,11 +6165,6 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)

reset_disabled_cpu_buffer(cpu_buffer);

/* Make sure persistent meta now uses this buffer's addresses */
meta = rb_range_meta(buffer, 0, cpu_buffer->cpu);
if (meta)
rb_meta_init_text_addr(meta);

atomic_dec(&cpu_buffer->record_disabled);
atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
}
Expand Down
37 changes: 32 additions & 5 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -5988,8 +5988,14 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
return __tracing_resize_ring_buffer(tr, size, cpu_id);
}

struct trace_scratch {
unsigned long kaslr_addr;
};

static void update_last_data(struct trace_array *tr)
{
struct trace_scratch *tscratch;

if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
return;

Expand All @@ -6004,6 +6010,17 @@ static void update_last_data(struct trace_array *tr)
/* Using current data now */
tr->text_delta = 0;

if (!tr->scratch)
return;

tscratch = tr->scratch;

/* Set the persistent ring buffer meta data to this address */
#ifdef CONFIG_RANDOMIZE_BASE
tscratch->kaslr_addr = kaslr_offset();
#else
tscratch->kaslr_addr = 0;
#endif
tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT;
}

Expand Down Expand Up @@ -6817,6 +6834,7 @@ static ssize_t
tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
struct trace_scratch *tscratch = tr->scratch;
struct seq_buf seq;
char buf[64];

Expand All @@ -6829,8 +6847,8 @@ tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t
* Otherwise it shows the KASLR address from the previous boot which
* should not be the same as the current boot.
*/
if (tr->flags & TRACE_ARRAY_FL_LAST_BOOT)
seq_buf_printf(&seq, "%lx\t[kernel]\n", tr->kaslr_addr);
if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
seq_buf_printf(&seq, "%lx\t[kernel]\n", tscratch->kaslr_addr);
else
seq_buf_puts(&seq, "# Current\n");

Expand Down Expand Up @@ -9210,6 +9228,8 @@ static int
allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
{
enum ring_buffer_flags rb_flags;
struct trace_scratch *tscratch;
unsigned int scratch_size;

rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;

Expand All @@ -9218,12 +9238,19 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
if (tr->range_addr_start && tr->range_addr_size) {
buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
tr->range_addr_start,
tr->range_addr_size, 0);
tr->range_addr_size,
sizeof(*tscratch));

tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
if (tscratch) {
tr->scratch = tscratch;
tr->scratch_size = scratch_size;

#ifdef CONFIG_RANDOMIZE_BASE
if (ring_buffer_last_boot_delta(buf->buffer, &tr->kaslr_addr))
tr->text_delta = kaslr_offset() - tr->kaslr_addr;
if (tscratch->kaslr_addr)
tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
#endif
}
/*
* This is basically the same as a mapped buffer,
* with the same restrictions.
Expand Down
6 changes: 4 additions & 2 deletions kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -348,8 +348,11 @@ struct trace_array {
unsigned int mapped;
unsigned long range_addr_start;
unsigned long range_addr_size;
unsigned long kaslr_addr;
long text_delta;
void *scratch; /* pointer in persistent memory */
int scratch_size;

int buffer_disabled;

struct trace_pid_list __rcu *filtered_pids;
struct trace_pid_list __rcu *filtered_no_pids;
Expand All @@ -367,7 +370,6 @@ struct trace_array {
* CONFIG_TRACER_MAX_TRACE.
*/
arch_spinlock_t max_lock;
int buffer_disabled;
#ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter;
int sys_refcount_exit;
Expand Down

0 comments on commit b653348

Please sign in to comment.