Skip to content

Commit

Permalink
Merge tag 'trace-v4.2-rc2-fix3' of git://git.kernel.org/pub/scm/linux…
Browse files Browse the repository at this point in the history
…/kernel/git/rostedt/linux-trace

Pull ftrace fix from Steven Rostedt:
 "Back in 3.16 the ftrace code was redesigned and cleaned up to remove
  the double iteration list (one for registered ftrace ops, and one for
  registered "global" ops), to just use one list.  That simplified the
  code but also broke the function tracing filtering on pid.

  This updates the code to handle the filtering again with the new
  logic"

* tag 'trace-v4.2-rc2-fix3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ftrace: Fix breakage of set_ftrace_pid
  • Loading branch information
Linus Torvalds committed Jul 25, 2015
2 parents 4520083 + e3eea14 commit 763e326
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 18 deletions.
3 changes: 3 additions & 0 deletions include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* SAVE_REGS. If another ops with this flag set is already registered
* for any of the functions that this ops will be registered for, then
* this ops will fail to register or set_filter_ip.
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
Expand All @@ -132,6 +133,7 @@ enum {
FTRACE_OPS_FL_MODIFYING = 1 << 11,
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14,
};

#ifdef CONFIG_DYNAMIC_FTRACE
Expand Down Expand Up @@ -159,6 +161,7 @@ struct ftrace_ops {
struct ftrace_ops *next;
unsigned long flags;
void *private;
ftrace_func_t saved_func;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
int nr_trampolines;
Expand Down
52 changes: 34 additions & 18 deletions kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,13 @@ struct ftrace_pid {
struct pid *pid;
};

static bool ftrace_pids_enabled(void)
{
return !list_empty(&ftrace_pids);
}

static void ftrace_update_trampoline(struct ftrace_ops *ops);

/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
Expand All @@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;

Expand Down Expand Up @@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
if (!test_tsk_trace_trace(current))
return;

ftrace_pid_function(ip, parent_ip, op, regs);
}

static void set_ftrace_pid_function(ftrace_func_t func)
{
/* do not set ftrace_pid_function to itself! */
if (func != ftrace_pid_func)
ftrace_pid_function = func;
op->saved_func(ip, parent_ip, op, regs);
}

/**
Expand All @@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
ftrace_pid_function = ftrace_stub;
}

static void control_ops_disable_all(struct ftrace_ops *ops)
Expand Down Expand Up @@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
} else
add_ftrace_ops(&ftrace_ops_list, ops);

/* Always save the function, and reset at unregistering */
ops->saved_func = ops->func;

if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
ops->func = ftrace_pid_func;

ftrace_update_trampoline(ops);

if (ftrace_enabled)
Expand Down Expand Up @@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();

ops->func = ops->saved_func;

return 0;
}

static void ftrace_update_pid_func(void)
{
bool enabled = ftrace_pids_enabled();
struct ftrace_ops *op;

/* Only do something if we are tracing something */
if (ftrace_trace_function == ftrace_stub)
return;

do_for_each_ftrace_op(op, ftrace_ops_list) {
if (op->flags & FTRACE_OPS_FL_PID) {
op->func = enabled ? ftrace_pid_func :
op->saved_func;
ftrace_update_trampoline(op);
}
} while_for_each_ftrace_op(op);

update_ftrace_function();
}

Expand Down Expand Up @@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED,
FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};

/*
Expand Down Expand Up @@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)

static struct ftrace_ops global_ops = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};

static int __init ftrace_nodyn_init(void)
Expand Down Expand Up @@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
if (WARN_ON(tr->ops->func != ftrace_stub))
printk("ftrace ops had %pS for function\n",
tr->ops->func);
/* Only the top level instance does pid tracing */
if (!list_empty(&ftrace_pids)) {
set_ftrace_pid_function(func);
func = ftrace_pid_func;
}
}
tr->ops->func = func;
tr->ops->private = tr;
Expand Down Expand Up @@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&ftrace_lock);

if (list_empty(&ftrace_pids) && (!*pos))
if (!ftrace_pids_enabled() && (!*pos))
return (void *) 1;

return seq_list_start(&ftrace_pids, *pos);
Expand Down Expand Up @@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID |
FTRACE_OPS_FL_STUB,
#ifdef FTRACE_GRAPH_TRAMP_ADDR
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
Expand Down

0 comments on commit 763e326

Please sign in to comment.