Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121063
b: refs/heads/master
c: 79c81d2
h: refs/heads/master
i:
  121061: c1075a4
  121059: 65078b2
  121055: dc5d475
v: v3
  • Loading branch information
Ingo Molnar committed Nov 6, 2008
1 parent 893f7e2 commit 4d94fe6
Show file tree
Hide file tree
Showing 8 changed files with 122 additions and 45 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 79a9d461fd521f133f0e66485aa9ed09c21f5191
refs/heads/master: 79c81d220c8e25163f56edcdfaf23f83a4c88e6b
18 changes: 17 additions & 1 deletion trunk/drivers/char/sysrq.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,22 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};

#ifdef CONFIG_TRACING
#include <linux/ftrace.h>

static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
ftrace_dump();
}
static struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
.help_msg = "dumpZ-ftrace-buffer",
.action_msg = "Dump ftrace buffer",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
#endif

static void sysrq_handle_showmem(int key, struct tty_struct *tty)
{
Expand Down Expand Up @@ -406,7 +422,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
NULL, /* x */
/* y: May be registered on sparc64 for global register dump */
NULL, /* y */
NULL /* z */
&sysrq_ftrace_dump_op, /* z */
};

/* key2index calculation, -1 on invalid index */
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ static struct ctl_table kern_table[] = {
#ifdef CONFIG_TRACING
{
.ctl_name = CTL_UNNUMBERED,
.procname = "ftrace_dump_on_opps",
.procname = "ftrace_dump_on_oops",
.data = &ftrace_dump_on_oops,
.maxlen = sizeof(int),
.mode = 0644,
Expand Down
27 changes: 9 additions & 18 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
#include <linux/list.h>
#include <linux/fs.h>

#include "trace.h"

/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0

Expand Down Expand Up @@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
return NULL;

/* If we are tracing schedule, we don't want to recurse */
resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();

cpu = raw_smp_processor_id();

Expand Down Expand Up @@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
return event;

out:
if (resched)
preempt_enable_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);
return NULL;
}

Expand Down Expand Up @@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
/*
* Only the last preempt count needs to restore preemption.
*/
if (preempt_count() == 1) {
if (per_cpu(rb_need_resched, cpu))
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
} else
if (preempt_count() == 1)
ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
else
preempt_enable_no_resched_notrace();

return 0;
Expand Down Expand Up @@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
if (atomic_read(&buffer->record_disabled))
return -EBUSY;

resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();

cpu = raw_smp_processor_id();

Expand All @@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,

ret = 0;
out:
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);

return ret;
}
Expand Down
48 changes: 41 additions & 7 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,7 @@ static const char *trace_options[] = {
"stacktrace",
"sched-tree",
"ftrace_printk",
"ftrace_preempt",
NULL
};

Expand Down Expand Up @@ -891,7 +892,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)

#ifdef CONFIG_FUNCTION_TRACER
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
Expand All @@ -904,8 +905,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
return;

pc = preempt_count();
resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();
local_save_flags(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
Expand All @@ -915,10 +915,38 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
trace_function(tr, data, ip, parent_ip, flags, pc);

atomic_dec(&data->disabled);
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;

if (unlikely(!ftrace_function_enabled))
return;

/*
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
raw_local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);

if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, data, ip, parent_ip, flags, pc);
}

atomic_dec(&data->disabled);
raw_local_irq_restore(flags);
}

static struct ftrace_ops trace_ops __read_mostly =
Expand All @@ -929,6 +957,12 @@ static struct ftrace_ops trace_ops __read_mostly =
void tracing_start_function_trace(void)
{
ftrace_function_enabled = 0;

if (trace_flags & TRACE_ITER_PREEMPTONLY)
trace_ops.func = function_trace_call_preempt_only;
else
trace_ops.func = function_trace_call;

register_ftrace_function(&trace_ops);
if (tracer_enabled)
ftrace_function_enabled = 1;
Expand Down
49 changes: 49 additions & 0 deletions trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -416,8 +416,57 @@ enum trace_iterator_flags {
TRACE_ITER_STACKTRACE = 0x100,
TRACE_ITER_SCHED_TREE = 0x200,
TRACE_ITER_PRINTK = 0x400,
TRACE_ITER_PREEMPTONLY = 0x800,
};

extern struct tracer nop_trace;

/**
* ftrace_preempt_disable - disable preemption scheduler safe
*
* When tracing can happen inside the scheduler, there exists
* cases that the tracing might happen before the need_resched
* flag is checked. If this happens and the tracer calls
* preempt_enable (after a disable), a schedule might take place
* causing an infinite recursion.
*
* To prevent this, we read the need_recshed flag before
* disabling preemption. When we want to enable preemption we
* check the flag, if it is set, then we call preempt_enable_no_resched.
* Otherwise, we call preempt_enable.
*
* The rational for doing the above is that if need resched is set
* and we have yet to reschedule, we are either in an atomic location
* (where we do not need to check for scheduling) or we are inside
* the scheduler and do not want to resched.
*/
static inline int ftrace_preempt_disable(void)
{
int resched;

resched = need_resched();
preempt_disable_notrace();

return resched;
}

/**
* ftrace_preempt_enable - enable preemption scheduler safe
* @resched: the return value from ftrace_preempt_disable
*
* This is a scheduler safe way to enable preemption and not miss
* any preemption checks. The disabled saved the state of preemption.
* If resched is set, then we were either inside an atomic or
* are inside the scheduler (we would have already scheduled
* otherwise). In this case, we do not want to call normal
* preempt_enable, but preempt_enable_no_resched instead.
*/
static inline void ftrace_preempt_enable(int resched)
{
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
}

#endif /* _LINUX_KERNEL_TRACE_H */
13 changes: 2 additions & 11 deletions trunk/kernel/trace/trace_sched_wakeup.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
return;

pc = preempt_count();
resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();

cpu = raw_smp_processor_id();
data = tr->data[cpu];
Expand Down Expand Up @@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
out:
atomic_dec(&data->disabled);

/*
* To prevent recursion from the scheduler, if the
* resched flag was set before we entered, then
* don't reschedule.
*/
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =
Expand Down
8 changes: 2 additions & 6 deletions trunk/kernel/trace/trace_stack.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
if (unlikely(!ftrace_enabled || stack_trace_disabled))
return;

resched = need_resched();
preempt_disable_notrace();
resched = ftrace_preempt_disable();

cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
Expand All @@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
ftrace_preempt_enable(resched);
}

static struct ftrace_ops trace_ops __read_mostly =
Expand Down

0 comments on commit 4d94fe6

Please sign in to comment.