Skip to content

Commit

Permalink
ring-buffer: Use sync sched protection on ring buffer resizing
Browse files Browse the repository at this point in the history
There was a comment in the ring buffer code that says the calling
layers should prevent tracing or reading of the ring buffer while
resizing. I have discovered that the tracers do not honor this
arrangement.

This patch moves the disabling and synchronizing the ring buffer to
a higher layer during resizing. This guarantees that no writes
are occurring while the resize takes place.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Dec 11, 2009
1 parent d954fbf commit 1842101
Showing 1 changed file with 9 additions and 16 deletions.
25 changes: 9 additions & 16 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
struct list_head *p;
unsigned i;

atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();

spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);

Expand All @@ -1214,9 +1211,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
spin_unlock_irq(&cpu_buffer->reader_lock);

rb_check_pages(cpu_buffer);

atomic_dec(&cpu_buffer->record_disabled);

}

static void
Expand All @@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *p;
unsigned i;

atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();

spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);

Expand All @@ -1245,20 +1236,13 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
spin_unlock_irq(&cpu_buffer->reader_lock);

rb_check_pages(cpu_buffer);

atomic_dec(&cpu_buffer->record_disabled);
}

/**
* ring_buffer_resize - resize the ring buffer
* @buffer: the buffer to resize.
* @size: the new size.
*
* The tracer is responsible for making sure that the buffer is
* not being used while changing the size.
* Note: We may be able to change the above requirement by using
* RCU synchronizations.
*
* Minimum size is 2 * BUF_PAGE_SIZE.
*
* Returns -1 on failure.
Expand Down Expand Up @@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
if (size == buffer_size)
return size;

atomic_inc(&buffer->record_disabled);

/* Make sure all writers are done with this buffer. */
synchronize_sched();

mutex_lock(&buffer->mutex);
get_online_cpus();

Expand Down Expand Up @@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
put_online_cpus();
mutex_unlock(&buffer->mutex);

atomic_dec(&buffer->record_disabled);

return size;

free_pages:
Expand All @@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
}
put_online_cpus();
mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return -ENOMEM;

/*
Expand All @@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
out_fail:
put_online_cpus();
mutex_unlock(&buffer->mutex);
atomic_dec(&buffer->record_disabled);
return -1;
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
Expand Down

0 comments on commit 1842101

Please sign in to comment.