Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 116603
b: refs/heads/master
c: e5a81b6
h: refs/heads/master
i:
  116601: b6cb9b4
  116599: cbb398e
v: v3
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Oct 14, 2008
1 parent d9fe65b commit c06f07b
Show file tree
Hide file tree
Showing 4 changed files with 265 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b3a320417484a6d6b9d28098944df58341353992
refs/heads/master: e5a81b629ea8feb9e7530cfac35cfb41c45facf3
9 changes: 9 additions & 0 deletions trunk/kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,15 @@ config CONTEXT_SWITCH_TRACER
This tracer gets called from the context switch and records
all switching of tasks.

config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FTRACE
select FTRACE
select STACKTRACE
help
This tracer records the max stack of the kernel, and displays
it in debugfs/tracing/stack_trace

config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
depends on FTRACE
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/trace/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ obj-$(CONFIG_FTRACE) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o

libftrace-y := ftrace.o
254 changes: 254 additions & 0 deletions trunk/kernel/trace/trace_stack.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,254 @@
/*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include "trace.h"

#define STACK_TRACE_ENTRIES 500

static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES] =
{ [0 ... (STACK_TRACE_ENTRIES-1)] = ULONG_MAX };
static struct stack_trace max_stack_trace = {
.max_entries = STACK_TRACE_ENTRIES,
.entries = stack_dump_trace,
};

static unsigned long max_stack_size;
static raw_spinlock_t max_stack_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;

static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);

static inline void check_stack(void)
{
unsigned long this_size;
unsigned long flags;

this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
this_size = THREAD_SIZE - this_size;

if (this_size <= max_stack_size)
return;

raw_local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);

/* a race could have already updated it */
if (this_size <= max_stack_size)
goto out;

max_stack_size = this_size;

max_stack_trace.nr_entries = 0;
max_stack_trace.skip = 1;

save_stack_trace(&max_stack_trace);

out:
__raw_spin_unlock(&max_stack_lock);
raw_local_irq_restore(flags);
}

static void
stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
int cpu, resched;

if (unlikely(!ftrace_enabled || stack_trace_disabled))
return;

resched = need_resched();
preempt_disable_notrace();

cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
if (per_cpu(trace_active, cpu)++ != 0)
goto out;

check_stack();

out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
}

static struct ftrace_ops trace_ops __read_mostly =
{
.func = stack_trace_call,
};

static ssize_t
stack_max_size_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
char buf[64];
int r;

r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
if (r > sizeof(buf))
r = sizeof(buf);
return simple_read_from_buffer(ubuf, count, ppos, buf, r);
}

static ssize_t
stack_max_size_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
long *ptr = filp->private_data;
unsigned long val, flags;
char buf[64];
int ret;

if (count >= sizeof(buf))
return -EINVAL;

if (copy_from_user(&buf, ubuf, count))
return -EFAULT;

buf[count] = 0;

ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret;

raw_local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);
*ptr = val;
__raw_spin_unlock(&max_stack_lock);
raw_local_irq_restore(flags);

return count;
}

static struct file_operations stack_max_size_fops = {
.open = tracing_open_generic,
.read = stack_max_size_read,
.write = stack_max_size_write,
};

static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
unsigned long *t = m->private;

(*pos)++;

if (!t || *t == ULONG_MAX)
return NULL;

t++;
m->private = t;

return t;
}

static void *t_start(struct seq_file *m, loff_t *pos)
{
unsigned long *t = m->private;
loff_t l = 0;

local_irq_disable();
__raw_spin_lock(&max_stack_lock);

for (; t && l < *pos; t = t_next(m, t, &l))
;

return t;
}

static void t_stop(struct seq_file *m, void *p)
{
__raw_spin_unlock(&max_stack_lock);
local_irq_enable();
}

static int trace_lookup_stack(struct seq_file *m, unsigned long addr)
{
#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];

sprint_symbol(str, addr);

return seq_printf(m, "[<%p>] %s\n", (void*)addr, str);
#else
return seq_printf(m, "%p\n", (void*)addr);
#endif
}

static int t_show(struct seq_file *m, void *v)
{
unsigned long *t = v;

if (!t || *t == ULONG_MAX)
return 0;

trace_lookup_stack(m, *t);

return 0;
}

static struct seq_operations stack_trace_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = t_show,
};

static int stack_trace_open(struct inode *inode, struct file *file)
{
int ret;

ret = seq_open(file, &stack_trace_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = stack_dump_trace;
}

return ret;
}

static struct file_operations stack_trace_fops = {
.open = stack_trace_open,
.read = seq_read,
.llseek = seq_lseek,
};

static __init int stack_trace_init(void)
{
struct dentry *d_tracer;
struct dentry *entry;

d_tracer = tracing_init_dentry();

entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops);
if (!entry)
pr_warning("Could not create debugfs 'stack_max_size' entry\n");

entry = debugfs_create_file("stack_trace", 0444, d_tracer,
NULL, &stack_trace_fops);
if (!entry)
pr_warning("Could not create debugfs 'stack_trace' entry\n");

register_ftrace_function(&trace_ops);

return 0;
}

device_initcall(stack_trace_init);

0 comments on commit c06f07b

Please sign in to comment.