Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 146033
b: refs/heads/master
c: 261842b
h: refs/heads/master
i:
  146031: 86a1a24
v: v3
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Apr 17, 2009
1 parent 3368fc3 commit d626045
Show file tree
Hide file tree
Showing 5 changed files with 54 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 12acd473d45cf2e40de3782cb2de712e5cd4d715
refs/heads/master: 261842b7c9099f56de2eb969c8ad65402d68e00e
7 changes: 7 additions & 0 deletions trunk/include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -488,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)

extern int ftrace_dump_on_oops;

#ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0,
#endif

#endif /* CONFIG_TRACING */

#ifndef INIT_TRACE_RECURSION
#define INIT_TRACE_RECURSION
#endif

#ifdef CONFIG_HW_BRANCH_TRACER

Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/init_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ extern struct cred init_cred;
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
}


Expand Down
4 changes: 3 additions & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1428,7 +1428,9 @@ struct task_struct {
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
#endif
/* bitmask of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
};

/* Future-safe accessor for struct task_struct's cpus_allowed. */
Expand Down
42 changes: 42 additions & 0 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1481,6 +1481,40 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
return event;
}

static int trace_irq_level(void)
{
return hardirq_count() + softirq_count() + in_nmi();
}

static int trace_recursive_lock(void)
{
int level;

level = trace_irq_level();

if (unlikely(current->trace_recursion & (1 << level))) {
/* Disable all tracing before we do anything else */
tracing_off_permanent();
WARN_ON_ONCE(1);
return -1;
}

current->trace_recursion |= 1 << level;

return 0;
}

static void trace_recursive_unlock(void)
{
int level;

level = trace_irq_level();

WARN_ON_ONCE(!current->trace_recursion & (1 << level));

current->trace_recursion &= ~(1 << level);
}

static DEFINE_PER_CPU(int, rb_need_resched);

/**
Expand Down Expand Up @@ -1514,6 +1548,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
/* If we are tracing schedule, we don't want to recurse */
resched = ftrace_preempt_disable();

if (trace_recursive_lock())
goto out_nocheck;

cpu = raw_smp_processor_id();

if (!cpumask_test_cpu(cpu, buffer->cpumask))
Expand Down Expand Up @@ -1543,6 +1580,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
return event;

out:
trace_recursive_unlock();

out_nocheck:
ftrace_preempt_enable(resched);
return NULL;
}
Expand Down Expand Up @@ -1581,6 +1621,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,

rb_commit(cpu_buffer, event);

trace_recursive_unlock();

/*
* Only the last preempt count needs to restore preemption.
*/
Expand Down

0 comments on commit d626045

Please sign in to comment.