Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 350044
b: refs/heads/master
c: debdd57
h: refs/heads/master
v: v3
  • Loading branch information
Hiraku Toyooka authored and Steven Rostedt committed Jan 30, 2013
1 parent 46ce21e commit 17f7b11
Show file tree
Hide file tree
Showing 5 changed files with 155 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2fd196ec1eab2623096e7fc7e6f3976160392bce
refs/heads/master: debdd57f5145f3c6a4b3f8d0126abd1a2def7fc6
3 changes: 3 additions & 0 deletions trunk/include/linux/ftrace_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ struct trace_iterator {
long idx;

cpumask_var_t started;

/* it's true when current open file is snapshot */
bool snapshot;
};

enum trace_iter_flags {
Expand Down
10 changes: 10 additions & 0 deletions trunk/kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,16 @@ config FTRACE_SYSCALLS
help
Basic tracer to catch the syscall entry and exit events.

config TRACER_SNAPSHOT
bool "Create a snapshot trace buffer"
select TRACER_MAX_TRACE
help
Allow tracing users to take snapshot of the current buffer using the
ftrace interface, e.g.:

echo 1 > /sys/kernel/debug/tracing/snapshot
cat snapshot

config TRACE_BRANCH_PROFILING
bool
select GENERIC_TRACER
Expand Down
166 changes: 140 additions & 26 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)

WARN_ON_ONCE(!irqs_disabled());

/* If we disabled the tracer, stop now */
if (current_trace == &nop_trace)
return;

if (WARN_ON_ONCE(!current_trace->use_max_tr))
if (!current_trace->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */
WARN_ON_ONCE(current_trace != &nop_trace);
return;
}

arch_spin_lock(&ftrace_max_lock);

Expand Down Expand Up @@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;

WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
return;
}

arch_spin_lock(&ftrace_max_lock);

Expand Down Expand Up @@ -866,10 +863,13 @@ int register_tracer(struct tracer *type)

current_trace = type;

/* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (type->use_max_tr) {
/* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded)
ring_buffer_resize(max_tr.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
type->allocated_snapshot = true;
}

/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
Expand All @@ -885,10 +885,14 @@ int register_tracer(struct tracer *type)
/* Only reset on passing, to avoid touching corrupted buffers */
tracing_reset_online_cpus(tr);

/* Shrink the max buffer again */
if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
if (type->use_max_tr) {
type->allocated_snapshot = false;

/* Shrink the max buffer again */
if (ring_buffer_expanded)
ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
}

printk(KERN_CONT "PASSED\n");
}
Expand Down Expand Up @@ -1964,7 +1968,11 @@ static void *s_start(struct seq_file *m, loff_t *pos)
*iter->trace = *current_trace;
mutex_unlock(&trace_types_lock);

atomic_inc(&trace_record_cmdline_disabled);
if (iter->snapshot && iter->trace->use_max_tr)
return ERR_PTR(-EBUSY);

if (!iter->snapshot)
atomic_inc(&trace_record_cmdline_disabled);

if (*pos != iter->pos) {
iter->ent = NULL;
Expand Down Expand Up @@ -2003,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p)
{
struct trace_iterator *iter = m->private;

atomic_dec(&trace_record_cmdline_disabled);
if (iter->snapshot && iter->trace->use_max_tr)
return;

if (!iter->snapshot)
atomic_dec(&trace_record_cmdline_disabled);
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
Expand Down Expand Up @@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
};

static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file)
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{
long cpu_file = (long) inode->i_private;
struct trace_iterator *iter;
Expand Down Expand Up @@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file)
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;

if (current_trace && current_trace->print_max)
if ((current_trace && current_trace->print_max) || snapshot)
iter->tr = &max_tr;
else
iter->tr = &global_trace;
iter->snapshot = snapshot;
iter->pos = -1;
mutex_init(&iter->mutex);
iter->cpu_file = cpu_file;
Expand All @@ -2491,8 +2504,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;

/* stop the trace while dumping */
tracing_stop();
/* stop the trace while dumping if we are not opening "snapshot" */
if (!iter->snapshot)
tracing_stop();

if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) {
Expand Down Expand Up @@ -2555,8 +2569,9 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);

/* reenable tracing if it was previously enabled */
tracing_start();
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */
tracing_start();
mutex_unlock(&trace_types_lock);

mutex_destroy(&iter->mutex);
Expand Down Expand Up @@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file)
}

if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file);
iter = __tracing_open(inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
Expand Down Expand Up @@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf)
if (current_trace && current_trace->reset)
current_trace->reset(tr);

had_max_tr = current_trace && current_trace->use_max_tr;
had_max_tr = current_trace && current_trace->allocated_snapshot;
current_trace = &nop_trace;

if (had_max_tr && !t->use_max_tr) {
Expand All @@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf)
*/
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&max_tr, 1);
tracing_reset_online_cpus(&max_tr);
current_trace->allocated_snapshot = false;
}
destroy_trace_option_files(topts);

Expand All @@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf)
RING_BUFFER_ALL_CPUS);
if (ret < 0)
goto out;
t->allocated_snapshot = true;
}

if (t->init) {
Expand Down Expand Up @@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
return single_open(file, tracing_clock_show, NULL);
}

#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
struct trace_iterator *iter;
int ret = 0;

if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file, true);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
}
return ret;
}

static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
unsigned long val;
int ret;

ret = tracing_update_buffers();
if (ret < 0)
return ret;

ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;

mutex_lock(&trace_types_lock);

if (current_trace && current_trace->use_max_tr) {
ret = -EBUSY;
goto out;
}

switch (val) {
case 0:
if (current_trace->allocated_snapshot) {
/* free spare buffer */
ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
set_buffer_entries(&max_tr, 1);
tracing_reset_online_cpus(&max_tr);
current_trace->allocated_snapshot = false;
}
break;
case 1:
if (!current_trace->allocated_snapshot) {
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&max_tr,
&global_trace, RING_BUFFER_ALL_CPUS);
if (ret < 0)
break;
current_trace->allocated_snapshot = true;
}

local_irq_disable();
/* Now, we're going to swap */
update_max_tr(&global_trace, current, smp_processor_id());
local_irq_enable();
break;
default:
if (current_trace->allocated_snapshot)
tracing_reset_online_cpus(&max_tr);
else
ret = -EINVAL;
break;
}

if (ret >= 0) {
*ppos += cnt;
ret = cnt;
}
out:
mutex_unlock(&trace_types_lock);
return ret;
}
#endif /* CONFIG_TRACER_SNAPSHOT */


static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
Expand Down Expand Up @@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = {
.write = tracing_clock_write,
};

#ifdef CONFIG_TRACER_SNAPSHOT
static const struct file_operations snapshot_fops = {
.open = tracing_snapshot_open,
.read = seq_read,
.write = tracing_snapshot_write,
.llseek = tracing_seek,
.release = tracing_release,
};
#endif /* CONFIG_TRACER_SNAPSHOT */

struct ftrace_buffer_info {
struct trace_array *tr;
void *spare;
Expand Down Expand Up @@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void)
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif

#ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file("snapshot", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
#endif

create_trace_options_dir();

for_each_tracing_cpu(cpu)
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ struct tracer {
struct tracer_flags *flags;
bool print_max;
bool use_max_tr;
bool allocated_snapshot;
};


Expand Down

0 comments on commit 17f7b11

Please sign in to comment.