Skip to content

Commit

Permalink
ftrace: move function tracer functions out of trace.c
Browse files Browse the repository at this point in the history
Impact: clean up of trace.c

The function tracer functions were put in trace.c because it needed
to share static variables that were in trace.c.  Since then, those
variables have become global for various reasons. This patch moves
the function tracer functions into trace_function.c where they belong.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Jan 16, 2009
1 parent 5361499 commit bb3c3c9
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 85 deletions.
84 changes: 0 additions & 84 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1046,65 +1046,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
local_irq_restore(flags);
}

#ifdef CONFIG_FUNCTION_TRACER
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu, resched;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

pc = preempt_count();
resched = ftrace_preempt_disable();
local_save_flags(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1))
trace_function(tr, data, ip, parent_ip, flags, pc);

atomic_dec(&data->disabled);
ftrace_preempt_enable(resched);
}

static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, data, ip, parent_ip, flags, pc);
}

atomic_dec(&data->disabled);
local_irq_restore(flags);
}

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
Expand Down Expand Up @@ -1162,31 +1103,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
};

void tracing_start_function_trace(void)
{
ftrace_function_enabled = 0;

if (trace_flags & TRACE_ITER_PREEMPTONLY)
trace_ops.func = function_trace_call_preempt_only;
else
trace_ops.func = function_trace_call;

register_ftrace_function(&trace_ops);
ftrace_function_enabled = 1;
}

void tracing_stop_function_trace(void)
{
ftrace_function_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
#endif

enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
Expand Down
84 changes: 83 additions & 1 deletion kernel/trace/trace_functions.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ static struct trace_array *func_trace;

static void start_function_trace(struct trace_array *tr)
{
func_trace = tr;
tr->cpu = get_cpu();
tracing_reset_online_cpus(tr);
put_cpu();
Expand All @@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr)

static int function_trace_init(struct trace_array *tr)
{
func_trace = tr;
start_function_trace(tr);
return 0;
}
Expand All @@ -51,6 +51,64 @@ static void function_trace_start(struct trace_array *tr)
tracing_reset_online_cpus(tr);
}

static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu, resched;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

pc = preempt_count();
resched = ftrace_preempt_disable();
local_save_flags(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1))
trace_function(tr, data, ip, parent_ip, flags, pc);

atomic_dec(&data->disabled);
ftrace_preempt_enable(resched);
}

static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, data, ip, parent_ip, flags, pc);
}

atomic_dec(&data->disabled);
local_irq_restore(flags);
}

static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
Expand Down Expand Up @@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
local_irq_restore(flags);
}


static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
};

void tracing_start_function_trace(void)
{
ftrace_function_enabled = 0;

if (trace_flags & TRACE_ITER_PREEMPTONLY)
trace_ops.func = function_trace_call_preempt_only;
else
trace_ops.func = function_trace_call;

register_ftrace_function(&trace_ops);
ftrace_function_enabled = 1;
}

void tracing_stop_function_trace(void)
{
ftrace_function_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
Expand Down

0 comments on commit bb3c3c9

Please sign in to comment.