Skip to content

Commit

Permalink
ring-buffer: Allow mapped field to be set without mapping
Browse files Browse the repository at this point in the history
In preparation for having the ring buffer mapped to a dedicated location,
which will have the same restrictions as user space memory mapped buffers,
allow it to use the "mapped" field of the ring_buffer_per_cpu structure
without having the user space meta page mapping.

When this starts using the mapped field, it will need to handle adding a
user space mapping (and removing it) from a ring buffer that is using a
dedicated memory range.

Link: https://lkml.kernel.org/r/20240612232025.190908567@goodmis.org

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Vincent Donnefort <vdonnefort@google.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vineeth Pillai <vineeth@bitbyteword.org>
Cc: Youssef Esmat <youssefesmat@google.com>
Cc: Beau Belgrave <beaub@linux.microsoft.com>
Cc: Alexander Graf <graf@amazon.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Ross Zwisler <zwisler@google.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt (Google) committed Jun 14, 2024
1 parent c3f38fa commit dd4900d
Showing 1 changed file with 27 additions and 10 deletions.
37 changes: 27 additions & 10 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,7 @@ struct ring_buffer_per_cpu {
unsigned long pages_removed;

unsigned int mapped;
unsigned int user_mapped; /* user space mapping */
struct mutex mapping_lock;
unsigned long *subbuf_ids; /* ID to subbuf VA */
struct trace_buffer_meta *meta_page;
Expand Down Expand Up @@ -5224,6 +5225,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct trace_buffer_meta *meta = cpu_buffer->meta_page;

if (!meta)
return;

meta->reader.read = cpu_buffer->reader_page->read;
meta->reader.id = cpu_buffer->reader_page->id;
meta->reader.lost_events = cpu_buffer->lost_events;
Expand Down Expand Up @@ -5280,7 +5284,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;

if (cpu_buffer->mapped)
if (cpu_buffer->user_mapped)
rb_update_meta_page(cpu_buffer);

rb_head_page_activate(cpu_buffer);
Expand Down Expand Up @@ -6167,7 +6171,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)

mutex_lock(&cpu_buffer->mapping_lock);

if (!cpu_buffer->mapped) {
if (!cpu_buffer->user_mapped) {
mutex_unlock(&cpu_buffer->mapping_lock);
return ERR_PTR(-ENODEV);
}
Expand All @@ -6191,19 +6195,26 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,

lockdep_assert_held(&cpu_buffer->mapping_lock);

/* mapped is always greater or equal to user_mapped */
if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
return -EINVAL;

if (inc && cpu_buffer->mapped == UINT_MAX)
return -EBUSY;

if (WARN_ON(!inc && cpu_buffer->mapped == 0))
if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
return -EINVAL;

mutex_lock(&cpu_buffer->buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);

if (inc)
if (inc) {
cpu_buffer->user_mapped++;
cpu_buffer->mapped++;
else
} else {
cpu_buffer->user_mapped--;
cpu_buffer->mapped--;
}

raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
mutex_unlock(&cpu_buffer->buffer->mutex);
Expand Down Expand Up @@ -6328,7 +6339,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,

mutex_lock(&cpu_buffer->mapping_lock);

if (cpu_buffer->mapped) {
if (cpu_buffer->user_mapped) {
err = __rb_map_vma(cpu_buffer, vma);
if (!err)
err = __rb_inc_dec_mapped(cpu_buffer, true);
Expand Down Expand Up @@ -6359,12 +6370,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);

raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

err = __rb_map_vma(cpu_buffer, vma);
if (!err) {
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
cpu_buffer->mapped = 1;
/* This is the first time it is mapped by user */
cpu_buffer->mapped++;
cpu_buffer->user_mapped = 1;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
} else {
kfree(cpu_buffer->subbuf_ids);
Expand Down Expand Up @@ -6392,18 +6406,21 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)

mutex_lock(&cpu_buffer->mapping_lock);

if (!cpu_buffer->mapped) {
if (!cpu_buffer->user_mapped) {
err = -ENODEV;
goto out;
} else if (cpu_buffer->mapped > 1) {
} else if (cpu_buffer->user_mapped > 1) {
__rb_inc_dec_mapped(cpu_buffer, false);
goto out;
}

mutex_lock(&buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);

cpu_buffer->mapped = 0;
/* This is the last user space mapping */
if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
cpu_buffer->mapped--;
cpu_buffer->user_mapped = 0;

raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

Expand Down

0 comments on commit dd4900d

Please sign in to comment.