Skip to content

Commit

Permalink
tracing: Remove ftrace_disable/enable_cpu()
Browse files Browse the repository at this point in the history
The ftrace_disable_cpu() and ftrace_enable_cpu() functions were
needed back before the ring buffer was lockless. Now that the
ring buffer is lockless (and has been for some time), these functions
serve no purpose, and unnecessarily slow down operations of the tracer.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed May 9, 2012
1 parent 50e18b9 commit 6817968
Showing 1 changed file with 2 additions and 42 deletions.
44 changes: 2 additions & 42 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,18 +87,6 @@ static int tracing_disabled = 1;

DEFINE_PER_CPU(int, ftrace_cpu_disabled);

static inline void ftrace_disable_cpu(void)
{
preempt_disable();
__this_cpu_inc(ftrace_cpu_disabled);
}

static inline void ftrace_enable_cpu(void)
{
__this_cpu_dec(ftrace_cpu_disabled);
preempt_enable();
}

cpumask_var_t __read_mostly tracing_buffer_mask;

/*
Expand Down Expand Up @@ -748,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)

arch_spin_lock(&ftrace_max_lock);

ftrace_disable_cpu();

ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);

if (ret == -EBUSY) {
Expand All @@ -763,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
"Failed to swap buffers due to commit in progress\n");
}

ftrace_enable_cpu();

WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);

__update_max_tr(tr, tsk, cpu);
Expand Down Expand Up @@ -916,13 +900,6 @@ void unregister_tracer(struct tracer *type)
mutex_unlock(&trace_types_lock);
}

static void __tracing_reset(struct ring_buffer *buffer, int cpu)
{
ftrace_disable_cpu();
ring_buffer_reset_cpu(buffer, cpu);
ftrace_enable_cpu();
}

void tracing_reset(struct trace_array *tr, int cpu)
{
struct ring_buffer *buffer = tr->buffer;
Expand All @@ -931,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu)

/* Make sure all commits have finished */
synchronize_sched();
__tracing_reset(buffer, cpu);
ring_buffer_reset_cpu(buffer, cpu);

ring_buffer_record_enable(buffer);
}
Expand All @@ -949,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
tr->time_start = ftrace_now(tr->cpu);

for_each_online_cpu(cpu)
__tracing_reset(buffer, cpu);
ring_buffer_reset_cpu(buffer, cpu);

ring_buffer_record_enable(buffer);
}
Expand Down Expand Up @@ -1733,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk);

static void trace_iterator_increment(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();

iter->idx++;
if (iter->buffer_iter[iter->cpu])
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);

ftrace_enable_cpu();
}

static struct trace_entry *
Expand All @@ -1750,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];

/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();

if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
lost_events);

ftrace_enable_cpu();

if (event) {
iter->ent_size = ring_buffer_event_length(event);
return ring_buffer_event_data(event);
Expand Down Expand Up @@ -1850,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)

static void trace_consume(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
&iter->lost_events);
ftrace_enable_cpu();
}

static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Expand Down Expand Up @@ -1943,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
iter->cpu = 0;
iter->idx = -1;

ftrace_disable_cpu();

if (cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu);
} else
tracing_iter_reset(iter, cpu_file);

ftrace_enable_cpu();

iter->leftover = 0;
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
;
Expand Down

0 comments on commit 6817968

Please sign in to comment.