Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 339006
b: refs/heads/master
c: 0d5c6e1
h: refs/heads/master
v: v3
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Nov 2, 2012
1 parent 30f2a1d commit 4eb8d76
Show file tree
Hide file tree
Showing 10 changed files with 85 additions and 74 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 02404baf1b47123f1c88c9f9f1f3b00e1e2b10db
refs/heads/master: 0d5c6e1c19bab82fad4837108c2902f557d62a04
14 changes: 7 additions & 7 deletions trunk/include/linux/ftrace_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,13 +127,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);

Expand Down
3 changes: 1 addition & 2 deletions trunk/include/trace/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \
{ assign; } \
\
if (!filter_current_check_discard(buffer, event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(buffer, \
event, irq_flags, pc); \
trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
select IRQ_WORK

config GENERIC_TRACER
bool
Expand Down
121 changes: 68 additions & 53 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
Expand Down Expand Up @@ -84,6 +85,14 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
*/
static DEFINE_PER_CPU(bool, trace_cmdline_save);

/*
* When a reader is waiting for data, then this variable is
* set to true.
*/
static bool trace_wakeup_needed;

static struct irq_work trace_work_wakeup;

/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
Expand Down Expand Up @@ -329,12 +338,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);

static void wakeup_work_handler(struct work_struct *work)
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
static void trace_wake_up(struct irq_work *work)
{
wake_up(&trace_wait);
}
wake_up_all(&trace_wait);

static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
}

/**
* tracing_on - enable tracing buffers
Expand Down Expand Up @@ -389,22 +404,6 @@ int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);

/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
void trace_wake_up(void)
{
const unsigned long delay = msecs_to_jiffies(2);

if (trace_flags & TRACE_ITER_BLOCK)
return;
schedule_delayed_work(&wakeup_work, delay);
}

static int __init set_buf_size(char *str)
{
unsigned long buf_size;
Expand Down Expand Up @@ -753,6 +752,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */

static void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);

prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);

/*
* The events can happen in critical sections where
* checking a work queue can cause deadlocks.
* After adding a task to the queue, this flag is set
* only to notify events to try to wake up the queue
* using irq_work.
*
* We don't clear it even if the buffer is no longer
* empty. The flag only causes the next event to run
* irq_work to do the work queue wake up. The worse
* that can happen if we race with !trace_empty() is that
* an event will cause an irq_work to try to wake up
* an empty queue.
*
* There's no reason to protect this flag either, as
* the work queue and irq_work logic will do the necessary
* synchronization for the wake ups. The only thing
* that is necessary is that the wake up happens after
* a task has been queued. It's OK for spurious wake ups.
*/
trace_wakeup_needed = true;

if (trace_empty(iter))
schedule();

finish_wait(&trace_wait, &wait);
}

/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
Expand Down Expand Up @@ -1156,30 +1189,32 @@ void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_cmdline_save, true);
if (trace_wakeup_needed) {
trace_wakeup_needed = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&trace_work_wakeup);
}
ring_buffer_unlock_commit(buffer, event);
}

static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
int wake)
unsigned long flags, int pc)
{
__buffer_unlock_commit(buffer, event);

ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);

if (wake)
trace_wake_up();
}

void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
__trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);

struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
Expand All @@ -1196,29 +1231,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
__trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);

void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);

void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);

ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);

void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
Expand Down Expand Up @@ -3354,19 +3381,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}


void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);

prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);

if (trace_empty(iter))
schedule();

finish_wait(&trace_wait, &wait);
}

/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
Expand Down Expand Up @@ -5107,6 +5121,7 @@ __init static int tracer_alloc_buffers(void)
#endif

trace_init_cmdlines();
init_irq_work(&trace_work_wakeup, trace_wake_up);

register_tracer(&nop_trace);
current_trace = &nop_trace;
Expand Down
5 changes: 0 additions & 5 deletions trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)

int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu);
Expand All @@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len,
unsigned long flags,
int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);

struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
Expand All @@ -370,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);

void tracing_iter_reset(struct trace_iterator *iter, int cpu);

void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);

void ftrace(struct trace_array *tr,
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/trace_events.c
Original file line number Diff line number Diff line change
Expand Up @@ -1760,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;

trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(buffer, event, flags, pc);

out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Expand Down
8 changes: 4 additions & 4 deletions trunk/kernel/trace/trace_kprobe.c
Original file line number Diff line number Diff line change
Expand Up @@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);

if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
}

/* Kretprobe handler */
Expand Down Expand Up @@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);

if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
}

/* Event entry printers */
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/trace_sched_switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);

if (!filter_check_discard(call, entry, buffer, event))
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(buffer, event, flags, pc);
}

static void
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/trace/trace_selftest.c
Original file line number Diff line number Diff line change
Expand Up @@ -1094,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);

Expand Down

0 comments on commit 4eb8d76

Please sign in to comment.