Skip to content

Commit

Permalink
Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/rostedt/linux-2.6-trace into tracing/core
  • Loading branch information
Ingo Molnar committed Aug 29, 2009
2 parents 0dd7b74 + 5d4a9db commit 73222ac
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 43 deletions.
1 change: 0 additions & 1 deletion kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ config EVENT_TRACING
bool

config CONTEXT_SWITCH_TRACER
select MARKERS
bool

# All tracer options should select GENERIC_TRACER. For those options that are
Expand Down
92 changes: 50 additions & 42 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,6 @@

#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)

unsigned long __read_mostly tracing_max_latency;
unsigned long __read_mostly tracing_thresh;

/*
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
Expand Down Expand Up @@ -338,45 +335,6 @@ static struct {

int trace_clock_id;

/*
* ftrace_max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* This is defined as a raw_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*/
static raw_spinlock_t ftrace_max_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;

/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/debug/tracing/latency_trace)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data = tr->data[cpu];

max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp;

data = max_tr.data[cpu];
data->saved_latency = tracing_max_latency;

memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
data->pid = tsk->pid;
data->uid = task_uid(tsk);
data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
data->policy = tsk->policy;
data->rt_priority = tsk->rt_priority;

/* record this tasks comm */
tracing_record_cmdline(tsk);
}

ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
int len;
Expand Down Expand Up @@ -420,6 +378,53 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
return cnt;
}

/*
* ftrace_max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* This is defined as a raw_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
static raw_spinlock_t ftrace_max_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;

#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
unsigned long __read_mostly tracing_thresh;

/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/debug/tracing/latency_trace)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data = tr->data[cpu];

max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp;

data = max_tr.data[cpu];
data->saved_latency = tracing_max_latency;

memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
data->pid = tsk->pid;
data->uid = task_uid(tsk);
data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
data->policy = tsk->policy;
data->rt_priority = tsk->rt_priority;

/* record this tasks comm */
tracing_record_cmdline(tsk);
}

/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
Expand Down Expand Up @@ -476,6 +481,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
__update_max_tr(tr, tsk, cpu);
__raw_spin_unlock(&ftrace_max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */

/**
* register_tracer - register a tracer with the ftrace system.
Expand Down Expand Up @@ -3952,11 +3958,13 @@ static __init int tracer_init_debugfs(void)
trace_create_file("current_tracer", 0644, d_tracer,
&global_trace, &set_tracer_fops);

#ifdef CONFIG_TRACER_MAX_TRACE
trace_create_file("tracing_max_latency", 0644, d_tracer,
&tracing_max_latency, &tracing_max_lat_fops);

trace_create_file("tracing_thresh", 0644, d_tracer,
&tracing_thresh, &tracing_max_lat_fops);
#endif

trace_create_file("README", 0444, d_tracer,
NULL, &tracing_readme_fops);
Expand Down
2 changes: 2 additions & 0 deletions kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -473,12 +473,14 @@ void unregister_tracer(struct tracer *type);

extern unsigned long nsecs_to_usecs(unsigned long nsecs);

#ifdef CONFIG_TRACER_MAX_TRACE
extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;

void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
#endif /* CONFIG_TRACER_MAX_TRACE */

#ifdef CONFIG_STACKTRACE
void ftrace_trace_stack(struct trace_array *tr, unsigned long flags,
Expand Down

0 comments on commit 73222ac

Please sign in to comment.