Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 349863
b: refs/heads/master
c: edc15ca
h: refs/heads/master
i:
  349861: 42835bb
  349859: cf243d0
  349855: da5fd17
v: v3
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Jan 23, 2013
1 parent 4d1aa7c commit 8df8fd7
Show file tree
Hide file tree
Showing 3 changed files with 111 additions and 37 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e46cbf75c621725964fe1f6e7013e8bcd86a0e3d
refs/heads/master: edc15cafcbfa3d73f819cae99885a2e35e4cbce5
40 changes: 9 additions & 31 deletions trunk/kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,25 +158,15 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
{
int bit;

if (in_interrupt()) {
if (in_nmi())
bit = TRACE_GLOBAL_NMI_BIT;

else if (in_irq())
bit = TRACE_GLOBAL_IRQ_BIT;
else
bit = TRACE_GLOBAL_SIRQ_BIT;
} else
bit = TRACE_GLOBAL_BIT;

if (unlikely(trace_recursion_test(bit)))
bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
if (bit < 0)
return;

trace_recursion_set(bit);
do_for_each_ftrace_op(op, ftrace_global_list) {
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
trace_recursion_clear(bit);

trace_clear_recursion(bit);
}

static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
Expand Down Expand Up @@ -4145,26 +4135,14 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored, struct pt_regs *regs)
{
struct ftrace_ops *op;
unsigned int bit;
int bit;

if (function_trace_stop)
return;

if (in_interrupt()) {
if (in_nmi())
bit = TRACE_INTERNAL_NMI_BIT;

else if (in_irq())
bit = TRACE_INTERNAL_IRQ_BIT;
else
bit = TRACE_INTERNAL_SIRQ_BIT;
} else
bit = TRACE_INTERNAL_BIT;

if (unlikely(trace_recursion_test(bit)))
return;

trace_recursion_set(bit);
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;

/*
* Some of the ops may be dynamically allocated,
Expand All @@ -4176,7 +4154,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
preempt_enable_notrace();
trace_recursion_clear(bit);
trace_clear_recursion(bit);
}

/*
Expand Down
106 changes: 101 additions & 5 deletions trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,18 +297,49 @@ struct tracer {
/* Ring buffer has the 10 LSB bits to count */
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)

/* for function tracing recursion */
/*
* For function tracing recursion:
* The order of these bits are important.
*
* When function tracing occurs, the following steps are made:
* If arch does not support a ftrace feature:
* call internal function (uses INTERNAL bits) which calls...
* If callback is registered to the "global" list, the list
* function is called and recursion checks the GLOBAL bits.
* then this function calls...
* The function callback, which can use the FTRACE bits to
* check for recursion.
*
* Now if the arch does not suppport a feature, and it calls
* the global list function which calls the ftrace callback
* all three of these steps will do a recursion protection.
* There's no reason to do one if the previous caller already
* did. The recursion that we are protecting against will
* go through the same steps again.
*
* To prevent the multiple recursion checks, if a recursion
* bit is set that is higher than the MAX bit of the current
* check, then we know that the check was made by the previous
* caller, and we can skip the current check.
*/
enum {
TRACE_INTERNAL_BIT = 11,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
TRACE_FTRACE_BIT = 11,
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,

/* GLOBAL_BITs must be greater than FTRACE_BITs */
TRACE_GLOBAL_BIT,
TRACE_GLOBAL_NMI_BIT,
TRACE_GLOBAL_IRQ_BIT,
TRACE_GLOBAL_SIRQ_BIT,

/* INTERNAL_BITs must be greater than GLOBAL_BITs */
TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,

TRACE_CONTROL_BIT,

/*
Expand All @@ -325,6 +356,71 @@ enum {
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))

#define TRACE_CONTEXT_BITS 4

#define TRACE_FTRACE_START TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_LIST_START TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)

#define TRACE_CONTEXT_MASK TRACE_LIST_MAX

static __always_inline int trace_get_context_bit(void)
{
int bit;

if (in_interrupt()) {
if (in_nmi())
bit = 0;

else if (in_irq())
bit = 1;
else
bit = 2;
} else
bit = 3;

return bit;
}

static __always_inline int trace_test_and_set_recursion(int start, int max)
{
unsigned int val = current->trace_recursion;
int bit;

/* A previous recursion check was made */
if ((val & TRACE_CONTEXT_MASK) > max)
return 0;

bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit)))
return -1;

val |= 1 << bit;
current->trace_recursion = val;
barrier();

return bit;
}

static __always_inline void trace_clear_recursion(int bit)
{
unsigned int val = current->trace_recursion;

if (!bit)
return;

bit = 1 << bit;
val &= ~bit;

barrier();
current->trace_recursion = val;
}

#define TRACE_PIPE_ALL_CPU -1

static inline struct ring_buffer_iter *
Expand Down

0 comments on commit 8df8fd7

Please sign in to comment.