Skip to content

Commit

Permalink
ftrace: add function tracing to wake up tracing
Browse files Browse the repository at this point in the history
This patch adds function tracing to the functions that are called
on the CPU of the task being traced.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Cc: pq@iki.fi
Cc: proski@gnu.org
Cc: sandmann@redhat.com
Cc: a.p.zijlstra@chello.nl
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Steven Rostedt authored and Thomas Gleixner committed May 26, 2008
1 parent 4902f88 commit 7e18d8e
Showing 1 changed file with 66 additions and 1 deletion.
67 changes: 66 additions & 1 deletion kernel/trace/trace_sched_wakeup.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,69 @@ static DEFINE_SPINLOCK(wakeup_lock);

static void __wakeup_reset(struct trace_array *tr);

#ifdef CONFIG_FTRACE
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int resched;
int cpu;

if (likely(!wakeup_task))
return;

resched = need_resched();
preempt_disable_notrace();

cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (unlikely(disabled != 1))
goto out;

spin_lock_irqsave(&wakeup_lock, flags);

if (unlikely(!wakeup_task))
goto unlock;

/*
* The task can't disappear because it needs to
* wake up first, and we have the wakeup_lock.
*/
if (task_cpu(wakeup_task) != cpu)
goto unlock;

trace_function(tr, data, ip, parent_ip, flags);

unlock:
spin_unlock_irqrestore(&wakeup_lock, flags);

out:
atomic_dec(&data->disabled);

/*
* To prevent recursion from the scheduler, if the
* resched flag was set before we entered, then
* don't reschedule.
*/
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
}

static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
};
#endif /* CONFIG_FTRACE */

/*
* Should this new latency be reported/recorded?
*/
Expand Down Expand Up @@ -73,7 +136,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
if (next != wakeup_task)
return;

/* The task we are waitng for is waking up */
/* The task we are waiting for is waking up */
data = tr->data[wakeup_cpu];

/* disable local data, not wakeup_cpu data */
Expand Down Expand Up @@ -290,6 +353,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
smp_wmb();

tracer_enabled = 1;
register_ftrace_function(&trace_ops);

return;
fail_deprobe_wake_new:
Expand All @@ -305,6 +369,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
unregister_ftrace_function(&trace_ops);
marker_probe_unregister("kernel_sched_schedule",
sched_switch_callback,
&wakeup_trace);
Expand Down

0 comments on commit 7e18d8e

Please sign in to comment.