Skip to content

Commit

Permalink
Port ftrace to markers
Browse files Browse the repository at this point in the history
Porting ftrace to the marker infrastructure.

Don't need to chain to the wakeup tracer from the sched tracer, because markers
support multiple probes connected.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
CC: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Mathieu Desnoyers authored and Thomas Gleixner committed May 23, 2008
1 parent 0aa977f commit 5b82a1b
Show file tree
Hide file tree
Showing 5 changed files with 255 additions and 88 deletions.
32 changes: 0 additions & 32 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2131,38 +2131,6 @@ __trace_special(void *__tr, void *__data,
}
#endif

#ifdef CONFIG_CONTEXT_SWITCH_TRACER
extern void
ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next);
extern void
ftrace_wake_up_task(void *rq, struct task_struct *wakee,
struct task_struct *curr);
extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
#else
static inline void
ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
{
}
static inline void
sched_trace_special(unsigned long p1, unsigned long p2, unsigned long p3)
{
}
static inline void
ftrace_wake_up_task(void *rq, struct task_struct *wakee,
struct task_struct *curr)
{
}
static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
{
}
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
}
#endif

extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);

Expand Down
14 changes: 11 additions & 3 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2500,7 +2500,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
success = 1;

out_running:
ftrace_wake_up_task(rq, p, rq->curr);
trace_mark(kernel_sched_wakeup,
"pid %d state %ld ## rq %p task %p rq->curr %p",
p->pid, p->state, rq, p, rq->curr);
check_preempt_curr(rq, p);

p->state = TASK_RUNNING;
Expand Down Expand Up @@ -2631,7 +2633,9 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
p->sched_class->task_new(rq, p);
inc_nr_running(rq);
}
ftrace_wake_up_task(rq, p, rq->curr);
trace_mark(kernel_sched_wakeup_new,
"pid %d state %ld ## rq %p task %p rq->curr %p",
p->pid, p->state, rq, p, rq->curr);
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
Expand Down Expand Up @@ -2804,7 +2808,11 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct mm_struct *mm, *oldmm;

prepare_task_switch(rq, prev, next);
ftrace_ctx_switch(rq, prev, next);
trace_mark(kernel_sched_schedule,
"prev_pid %d next_pid %d prev_state %ld "
"## rq %p prev %p next %p",
prev->pid, next->pid, prev->state,
rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
Expand Down
20 changes: 1 addition & 19 deletions kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -234,25 +234,10 @@ void update_max_tr_single(struct trace_array *tr,

extern cycle_t ftrace_now(int cpu);

#ifdef CONFIG_SCHED_TRACER
extern void
wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
extern void
wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr);
#else
static inline void
wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
{
}
static inline void
wakeup_sched_wakeup(struct task_struct *wakee, struct task_struct *curr)
{
}
#endif

#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
(*tracer_switch_func_t)(void *private,
void *__rq,
struct task_struct *prev,
struct task_struct *next);

Expand All @@ -262,9 +247,6 @@ struct tracer_switch_ops {
struct tracer_switch_ops *next;
};

extern int register_tracer_switch(struct tracer_switch_ops *ops);
extern int unregister_tracer_switch(struct tracer_switch_ops *ops);

#endif /* CONFIG_CONTEXT_SWITCH_TRACER */

#ifdef CONFIG_DYNAMIC_FTRACE
Expand Down
171 changes: 143 additions & 28 deletions kernel/trace/trace_sched_switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,14 @@

static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static atomic_t sched_ref;

static void
ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
sched_switch_func(void *private, void *__rq, struct task_struct *prev,
struct task_struct *next)
{
struct trace_array *tr = ctx_trace;
struct trace_array **ptr = private;
struct trace_array *tr = *ptr;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
Expand All @@ -41,10 +44,40 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
local_irq_restore(flags);
}

static notrace void
sched_switch_callback(void *probe_data, void *call_data,
const char *format, va_list *args)
{
struct task_struct *prev;
struct task_struct *next;
struct rq *__rq;

if (!atomic_read(&sched_ref))
return;

/* skip prev_pid %d next_pid %d prev_state %ld */
(void)va_arg(*args, int);
(void)va_arg(*args, int);
(void)va_arg(*args, long);
__rq = va_arg(*args, typeof(__rq));
prev = va_arg(*args, typeof(prev));
next = va_arg(*args, typeof(next));

tracing_record_cmdline(prev);

/*
* If tracer_switch_func only points to the local
* switch func, it still needs the ptr passed to it.
*/
sched_switch_func(probe_data, __rq, prev, next);
}

static void
wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
task_struct *curr)
{
struct trace_array *tr = ctx_trace;
struct trace_array **ptr = private;
struct trace_array *tr = *ptr;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
Expand All @@ -67,35 +100,29 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
local_irq_restore(flags);
}

void
ftrace_ctx_switch(void *__rq, struct task_struct *prev,
struct task_struct *next)
static notrace void
wake_up_callback(void *probe_data, void *call_data,
const char *format, va_list *args)
{
if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
tracing_record_cmdline(prev);
struct task_struct *curr;
struct task_struct *task;
struct rq *__rq;

/*
* If tracer_switch_func only points to the local
* switch func, it still needs the ptr passed to it.
*/
ctx_switch_func(__rq, prev, next);
if (likely(!tracer_enabled))
return;

/*
* Chain to the wakeup tracer (this is a NOP if disabled):
*/
wakeup_sched_switch(prev, next);
}
/* Skip pid %d state %ld */
(void)va_arg(*args, int);
(void)va_arg(*args, long);
/* now get the meat: "rq %p task %p rq->curr %p" */
__rq = va_arg(*args, typeof(__rq));
task = va_arg(*args, typeof(task));
curr = va_arg(*args, typeof(curr));

void
ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
struct task_struct *curr)
{
wakeup_func(__rq, wakee, curr);
tracing_record_cmdline(task);
tracing_record_cmdline(curr);

/*
* Chain to the wakeup tracer (this is a NOP if disabled):
*/
wakeup_sched_wakeup(wakee, curr);
wakeup_func(probe_data, __rq, task, curr);
}

void
Expand Down Expand Up @@ -132,15 +159,95 @@ static void sched_switch_reset(struct trace_array *tr)
tracing_reset(tr->data[cpu]);
}

static int tracing_sched_register(void)
{
int ret;

ret = marker_probe_register("kernel_sched_wakeup",
"pid %d state %ld ## rq %p task %p rq->curr %p",
wake_up_callback,
&ctx_trace);
if (ret) {
pr_info("wakeup trace: Couldn't add marker"
" probe to kernel_sched_wakeup\n");
return ret;
}

ret = marker_probe_register("kernel_sched_wakeup_new",
"pid %d state %ld ## rq %p task %p rq->curr %p",
wake_up_callback,
&ctx_trace);
if (ret) {
pr_info("wakeup trace: Couldn't add marker"
" probe to kernel_sched_wakeup_new\n");
goto fail_deprobe;
}

ret = marker_probe_register("kernel_sched_schedule",
"prev_pid %d next_pid %d prev_state %ld "
"## rq %p prev %p next %p",
sched_switch_callback,
&ctx_trace);
if (ret) {
pr_info("sched trace: Couldn't add marker"
" probe to kernel_sched_schedule\n");
goto fail_deprobe_wake_new;
}

return ret;
fail_deprobe_wake_new:
marker_probe_unregister("kernel_sched_wakeup_new",
wake_up_callback,
&ctx_trace);
fail_deprobe:
marker_probe_unregister("kernel_sched_wakeup",
wake_up_callback,
&ctx_trace);
return ret;
}

static void tracing_sched_unregister(void)
{
marker_probe_unregister("kernel_sched_schedule",
sched_switch_callback,
&ctx_trace);
marker_probe_unregister("kernel_sched_wakeup_new",
wake_up_callback,
&ctx_trace);
marker_probe_unregister("kernel_sched_wakeup",
wake_up_callback,
&ctx_trace);
}

void tracing_start_sched_switch(void)
{
long ref;

ref = atomic_inc_return(&sched_ref);
if (ref == 1)
tracing_sched_register();
}

void tracing_stop_sched_switch(void)
{
long ref;

ref = atomic_dec_and_test(&sched_ref);
if (ref)
tracing_sched_unregister();
}

static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
atomic_inc(&trace_record_cmdline_enabled);
tracer_enabled = 1;
tracing_start_sched_switch();
}

static void stop_sched_trace(struct trace_array *tr)
{
tracing_stop_sched_switch();
atomic_dec(&trace_record_cmdline_enabled);
tracer_enabled = 0;
}
Expand Down Expand Up @@ -181,6 +288,14 @@ static struct tracer sched_switch_trace __read_mostly =

__init static int init_sched_switch_trace(void)
{
int ret = 0;

if (atomic_read(&sched_ref))
ret = tracing_sched_register();
if (ret) {
pr_info("error registering scheduler trace\n");
return ret;
}
return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);
Loading

0 comments on commit 5b82a1b

Please sign in to comment.