Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 211957
b: refs/heads/master
c: 5e6d2b9
h: refs/heads/master
i:
  211955: 08b2f80
v: v3
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Oct 18, 2010
1 parent 22f439e commit 140f27e
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 49 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 542181d3769d001c59cd17573dd4381e87d215f2
refs/heads/master: 5e6d2b9cfa3a6e7fe62fc0135bc1bd778f5db564
96 changes: 48 additions & 48 deletions trunk/kernel/trace/trace_irqsoff.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,14 +87,22 @@ static __cacheline_aligned_in_smp unsigned long max_sequence;

#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
* Prologue for the preempt and irqs off function tracers.
*
* Returns 1 if it is OK to continue, and data->disabled is
* incremented.
* 0 if the trace is to be ignored, and data->disabled
* is kept the same.
*
* Note, this function is also used outside this ifdef but
* inside the #ifdef of the function graph tracer below.
* This is OK, since the function graph tracer is
* dependent on the function tracer.
*/
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static int func_prolog_dec(struct trace_array *tr,
struct trace_array_cpu **data,
unsigned long *flags)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;

Expand All @@ -106,18 +114,38 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
*/
cpu = raw_smp_processor_id();
if (likely(!per_cpu(tracing_cpu, cpu)))
return;
return 0;

local_save_flags(flags);
local_save_flags(*flags);
/* slight chance to get a false positive on tracing_cpu */
if (!irqs_disabled_flags(flags))
return;
if (!irqs_disabled_flags(*flags))
return 0;

data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
*data = tr->data[cpu];
disabled = atomic_inc_return(&(*data)->disabled);

if (likely(disabled == 1))
trace_function(tr, ip, parent_ip, flags, preempt_count());
return 1;

atomic_dec(&(*data)->disabled);

return 0;
}

/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;

if (!func_prolog_dec(tr, &data, &flags))
return;

trace_function(tr, ip, parent_ip, flags, preempt_count());

atomic_dec(&data->disabled);
}
Expand Down Expand Up @@ -155,30 +183,16 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int ret;
int cpu;
int pc;

cpu = raw_smp_processor_id();
if (likely(!per_cpu(tracing_cpu, cpu)))
return 0;

local_save_flags(flags);
/* slight chance to get a false positive on tracing_cpu */
if (!irqs_disabled_flags(flags))
if (!func_prolog_dec(tr, &data, &flags))
return 0;

data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
ret = __trace_graph_entry(tr, trace, flags, pc);
} else
ret = 0;

pc = preempt_count();
ret = __trace_graph_entry(tr, trace, flags, pc);
atomic_dec(&data->disabled);

return ret;
}

Expand All @@ -187,27 +201,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;

cpu = raw_smp_processor_id();
if (likely(!per_cpu(tracing_cpu, cpu)))
return;

local_save_flags(flags);
/* slight chance to get a false positive on tracing_cpu */
if (!irqs_disabled_flags(flags))
if (!func_prolog_dec(tr, &data, &flags))
return;

data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
__trace_graph_return(tr, trace, flags, pc);
}

pc = preempt_count();
__trace_graph_return(tr, trace, flags, pc);
atomic_dec(&data->disabled);
}

Expand Down

0 comments on commit 140f27e

Please sign in to comment.