Skip to content

Commit

Permalink
tracing: Have trace_array keep track if snapshot buffer is allocated
Browse files Browse the repository at this point in the history
The snapshot buffer belongs to the trace array not the tracer that is
running. The trace array should be the data structure that keeps track
of whether or not the snapshot buffer is allocated, not the tracer
desciptor. Having the trace array keep track of it makes modifications
so much easier.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt (Red Hat) authored and Steven Rostedt committed Mar 15, 2013
1 parent 6de58e6 commit 45ad21c
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 18 deletions.
32 changes: 15 additions & 17 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)

WARN_ON_ONCE(!irqs_disabled());

if (!tr->current_trace->allocated_snapshot) {
if (!tr->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */
WARN_ON_ONCE(tr->current_trace != &nop_trace);
return;
Expand Down Expand Up @@ -700,7 +700,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;

WARN_ON_ONCE(!irqs_disabled());
if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot))
if (WARN_ON_ONCE(!tr->allocated_snapshot))
return;

arch_spin_lock(&ftrace_max_lock);
Expand Down Expand Up @@ -802,7 +802,7 @@ int register_tracer(struct tracer *type)
if (ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
type->allocated_snapshot = true;
tr->allocated_snapshot = true;
}
#endif

Expand All @@ -822,7 +822,7 @@ int register_tracer(struct tracer *type)

#ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) {
type->allocated_snapshot = false;
tr->allocated_snapshot = false;

/* Shrink the max buffer again */
if (ring_buffer_expanded)
Expand Down Expand Up @@ -2463,7 +2463,7 @@ static void show_snapshot_percpu_help(struct seq_file *m)

static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
{
if (iter->trace->allocated_snapshot)
if (iter->tr->allocated_snapshot)
seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
else
seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
Expand Down Expand Up @@ -3364,12 +3364,12 @@ static int tracing_set_tracer(const char *buf)
if (tr->current_trace->reset)
tr->current_trace->reset(tr);

#ifdef CONFIG_TRACER_MAX_TRACE
had_max_tr = tr->current_trace->allocated_snapshot;

/* Current trace needs to be nop_trace before synchronize_sched */
tr->current_trace = &nop_trace;

#ifdef CONFIG_TRACER_MAX_TRACE
had_max_tr = tr->allocated_snapshot;

if (had_max_tr && !t->use_max_tr) {
/*
* We need to make sure that the update_max_tr sees that
Expand All @@ -3387,10 +3387,8 @@ static int tracing_set_tracer(const char *buf)
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
tr->current_trace->allocated_snapshot = false;
tr->allocated_snapshot = false;
}
#else
tr->current_trace = &nop_trace;
#endif
destroy_trace_option_files(topts);

Expand All @@ -3403,7 +3401,7 @@ static int tracing_set_tracer(const char *buf)
RING_BUFFER_ALL_CPUS);
if (ret < 0)
goto out;
t->allocated_snapshot = true;
tr->allocated_snapshot = true;
}
#endif

Expand Down Expand Up @@ -4275,13 +4273,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = -EINVAL;
break;
}
if (tr->current_trace->allocated_snapshot) {
if (tr->allocated_snapshot) {
/* free spare buffer */
ring_buffer_resize(tr->max_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
tr->current_trace->allocated_snapshot = false;
tr->allocated_snapshot = false;
}
break;
case 1:
Expand All @@ -4292,13 +4290,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
break;
}
#endif
if (!tr->current_trace->allocated_snapshot) {
if (!tr->allocated_snapshot) {
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0)
break;
tr->current_trace->allocated_snapshot = true;
tr->allocated_snapshot = true;
}
local_irq_disable();
/* Now, we're going to swap */
Expand All @@ -4309,7 +4307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
local_irq_enable();
break;
default:
if (tr->current_trace->allocated_snapshot) {
if (tr->allocated_snapshot) {
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&tr->max_buffer);
else
Expand Down
2 changes: 1 addition & 1 deletion kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ struct trace_array {
* the trace_buffer so the tracing can continue.
*/
struct trace_buffer max_buffer;
bool allocated_snapshot;
#endif
int buffer_disabled;
struct trace_cpu trace_cpu; /* place holder */
Expand Down Expand Up @@ -367,7 +368,6 @@ struct tracer {
bool enabled;
#ifdef CONFIG_TRACER_MAX_TRACE
bool use_max_tr;
bool allocated_snapshot;
#endif
};

Expand Down

0 comments on commit 45ad21c

Please sign in to comment.