Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140637
b: refs/heads/master
c: bb3c3c9
h: refs/heads/master
i:
  140635: a18d47b
v: v3
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Jan 16, 2009
1 parent 2804336 commit cfda67d
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 86 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5361499101306cfb776c3cfa0f69d0479bc63868
refs/heads/master: bb3c3c95f330f7bf16e33b002e48882616089db1
84 changes: 0 additions & 84 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1046,65 +1046,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
local_irq_restore(flags);
}

#ifdef CONFIG_FUNCTION_TRACER
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu, resched;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

pc = preempt_count();
resched = ftrace_preempt_disable();
local_save_flags(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1))
trace_function(tr, data, ip, parent_ip, flags, pc);

atomic_dec(&data->disabled);
ftrace_preempt_enable(resched);
}

static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, data, ip, parent_ip, flags, pc);
}

atomic_dec(&data->disabled);
local_irq_restore(flags);
}

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
Expand Down Expand Up @@ -1162,31 +1103,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
};

void tracing_start_function_trace(void)
{
ftrace_function_enabled = 0;

if (trace_flags & TRACE_ITER_PREEMPTONLY)
trace_ops.func = function_trace_call_preempt_only;
else
trace_ops.func = function_trace_call;

register_ftrace_function(&trace_ops);
ftrace_function_enabled = 1;
}

void tracing_stop_function_trace(void)
{
ftrace_function_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
#endif

enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
Expand Down
84 changes: 83 additions & 1 deletion trunk/kernel/trace/trace_functions.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ static struct trace_array *func_trace;

static void start_function_trace(struct trace_array *tr)
{
func_trace = tr;
tr->cpu = get_cpu();
tracing_reset_online_cpus(tr);
put_cpu();
Expand All @@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr)

static int function_trace_init(struct trace_array *tr)
{
func_trace = tr;
start_function_trace(tr);
return 0;
}
Expand All @@ -51,6 +51,64 @@ static void function_trace_start(struct trace_array *tr)
tracing_reset_online_cpus(tr);
}

static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu, resched;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

pc = preempt_count();
resched = ftrace_preempt_disable();
local_save_flags(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1))
trace_function(tr, data, ip, parent_ip, flags, pc);

atomic_dec(&data->disabled);
ftrace_preempt_enable(resched);
}

static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, data, ip, parent_ip, flags, pc);
}

atomic_dec(&data->disabled);
local_irq_restore(flags);
}

static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
Expand Down Expand Up @@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
local_irq_restore(flags);
}


static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
};

void tracing_start_function_trace(void)
{
ftrace_function_enabled = 0;

if (trace_flags & TRACE_ITER_PREEMPTONLY)
trace_ops.func = function_trace_call_preempt_only;
else
trace_ops.func = function_trace_call;

register_ftrace_function(&trace_ops);
ftrace_function_enabled = 1;
}

void tracing_stop_function_trace(void)
{
ftrace_function_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
Expand Down

0 comments on commit cfda67d

Please sign in to comment.