Skip to content

Commit

Permalink
ftrace: add trace_function api for other tracers to use
Browse files Browse the repository at this point in the history
A new check was added in the ftrace function that wont trace if the CPU
trace buffer is disabled.  Unfortunately, other tracers used ftrace() to
write to the buffer after they disabled it. The new disable check makes
these calls into a nop.

This patch changes the __ftrace that is called without the check into a
new api for the other tracers to use, called "trace_function". The other
tracers use this interface instead when the trace CPU buffer is already
disabled.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Steven Rostedt authored and Thomas Gleixner committed May 23, 2008
1 parent 2a2cc8f commit 6fb44b7
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 11 deletions.
8 changes: 4 additions & 4 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -641,8 +641,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
}

notrace void
__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags)
trace_function(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
struct trace_entry *entry;
unsigned long irq_flags;
Expand All @@ -664,7 +664,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
if (likely(!atomic_read(&data->disabled)))
__ftrace(tr, data, ip, parent_ip, flags);
trace_function(tr, data, ip, parent_ip, flags);
}

notrace void
Expand Down Expand Up @@ -730,7 +730,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1))
__ftrace(tr, data, ip, parent_ip, flags);
trace_function(tr, data, ip, parent_ip, flags);

atomic_dec(&data->disabled);
local_irq_restore(flags);
Expand Down
5 changes: 5 additions & 0 deletions kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,11 @@ void trace_special(struct trace_array *tr,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3);
void trace_function(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags);

void tracing_start_function_trace(void);
void tracing_stop_function_trace(void);
Expand Down
10 changes: 5 additions & 5 deletions kernel/trace/trace_irqsoff.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1))
ftrace(tr, data, ip, parent_ip, flags);
trace_function(tr, data, ip, parent_ip, flags);

atomic_dec(&data->disabled);
}
Expand Down Expand Up @@ -150,7 +150,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out_unlock;

ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);

latency = nsecs_to_usecs(delta);

Expand Down Expand Up @@ -188,7 +188,7 @@ check_critical_timing(struct trace_array *tr,
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
tracing_reset(data);
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
}

static inline void notrace
Expand Down Expand Up @@ -221,7 +221,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)

local_save_flags(flags);

ftrace(tr, data, ip, parent_ip, flags);
trace_function(tr, data, ip, parent_ip, flags);

__get_cpu_var(tracing_cpu) = 1;

Expand Down Expand Up @@ -254,7 +254,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)

atomic_inc(&data->disabled);
local_save_flags(flags);
ftrace(tr, data, ip, parent_ip, flags);
trace_function(tr, data, ip, parent_ip, flags);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
Expand Down
5 changes: 3 additions & 2 deletions kernel/trace/trace_sched_wakeup.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
if (unlikely(!tracer_enabled || next != wakeup_task))
goto out_unlock;

ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);

/*
* usecs conversion is slow so we try to delay the conversion
Expand Down Expand Up @@ -192,7 +192,8 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
local_save_flags(flags);

tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
trace_function(tr, tr->data[wakeup_cpu],
CALLER_ADDR1, CALLER_ADDR2, flags);

out_locked:
spin_unlock(&wakeup_lock);
Expand Down

0 comments on commit 6fb44b7

Please sign in to comment.