Skip to content

Commit

Permalink
Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/rostedt/linux-2.6-trace into perf/urgent
  • Loading branch information
Ingo Molnar committed May 27, 2011
2 parents b1d2dc3 + b1cff0a commit d6a72fe
Show file tree
Hide file tree
Showing 17 changed files with 148 additions and 41 deletions.
4 changes: 2 additions & 2 deletions arch/blackfin/mm/maccess.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
return bfin_mem_access_type(addr, size);
}

long probe_kernel_read(void *dst, void *src, size_t size)
long probe_kernel_read(void *dst, const void *src, size_t size)
{
unsigned long lsrc = (unsigned long)src;
int mem_type;
Expand Down Expand Up @@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
return -EFAULT;
}

long probe_kernel_write(void *dst, void *src, size_t size)
long probe_kernel_write(void *dst, const void *src, size_t size)
{
unsigned long ldst = (unsigned long)dst;
int mem_type;
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/mm/maccess.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
* using the stura instruction.
* Returns the number of bytes copied or -EFAULT.
*/
static long probe_kernel_write_odd(void *dst, void *src, size_t size)
static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
{
unsigned long count, aligned;
int offset, mask;
Expand All @@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)
return rc ? rc : count;
}

long probe_kernel_write(void *dst, void *src, size_t size)
long probe_kernel_write(void *dst, const void *src, size_t size)
{
long copied = 0;

Expand Down
12 changes: 6 additions & 6 deletions arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
static void *mod_code_newcode; /* holds the text to write to the IP */
static const void *mod_code_newcode; /* holds the text to write to the IP */

static unsigned nmi_wait_count;
static atomic_t nmi_update_count = ATOMIC_INIT(0);
Expand Down Expand Up @@ -225,7 +225,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
}

static int
do_ftrace_mod_code(unsigned long ip, void *new_code)
do_ftrace_mod_code(unsigned long ip, const void *new_code)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
Expand Down Expand Up @@ -266,8 +266,8 @@ static const unsigned char *ftrace_nop_replace(void)
}

static int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
unsigned const char *new_code)
{
unsigned char replaced[MCOUNT_INSN_SIZE];

Expand Down Expand Up @@ -301,7 +301,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
unsigned const char *new, *old;
unsigned long ip = rec->ip;

old = ftrace_call_replace(ip, addr);
Expand All @@ -312,7 +312,7 @@ int ftrace_make_nop(struct module *mod,

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
unsigned const char *new, *old;
unsigned long ip = rec->ip;

old = ftrace_nop_replace();
Expand Down
12 changes: 12 additions & 0 deletions include/linux/ftrace_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,25 @@ struct trace_print_flags {
const char *name;
};

struct trace_print_flags_u64 {
unsigned long long mask;
const char *name;
};

const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
unsigned long flags,
const struct trace_print_flags *flag_array);

const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array);

#if BITS_PER_LONG == 32
const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
unsigned long long val,
const struct trace_print_flags_u64
*symbol_array);
#endif

const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);

Expand Down
2 changes: 1 addition & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1535,7 +1535,7 @@ struct task_struct {
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
/* bitmask of trace recursion */
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
Expand Down
8 changes: 4 additions & 4 deletions include/linux/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
extern long __probe_kernel_read(void *dst, void *src, size_t size);
extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);

/*
* probe_kernel_write(): safely attempt to write to a location
Expand All @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);

#endif /* __LINUX_UACCESS_H__ */
4 changes: 2 additions & 2 deletions include/trace/events/btrfs.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ struct extent_buffer;
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })

#define __show_root_type(obj) \
__print_symbolic(obj, \
__print_symbolic_u64(obj, \
{ BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
{ BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
{ BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
Expand Down Expand Up @@ -125,7 +125,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
);

#define __show_map_type(type) \
__print_symbolic(type, \
__print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
{ EXTENT_MAP_INLINE, "INLINE" }, \
Expand Down
13 changes: 13 additions & 0 deletions include/trace/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,19 @@
ftrace_print_symbols_seq(p, value, symbols); \
})

#undef __print_symbolic_u64
#if BITS_PER_LONG == 32
#define __print_symbolic_u64(value, symbol_array...) \
({ \
static const struct trace_print_flags_u64 symbols[] = \
{ symbol_array, { -1, NULL } }; \
ftrace_print_symbols_seq_u64(p, value, symbols); \
})
#else
#define __print_symbolic_u64(value, symbol_array...) \
__print_symbolic(value, symbol_array)
#endif

#undef __print_hex
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)

Expand Down
18 changes: 13 additions & 5 deletions kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,12 @@ static int __jump_label_text_reserved(struct jump_entry *iter_start,
}

static void __jump_label_update(struct jump_label_key *key,
struct jump_entry *entry, int enable)
struct jump_entry *entry,
struct jump_entry *stop, int enable)
{
for (; entry->key == (jump_label_t)(unsigned long)key; entry++) {
for (; (entry < stop) &&
(entry->key == (jump_label_t)(unsigned long)key);
entry++) {
/*
* entry->code set to 0 invalidates module init text sections
* kernel_text_address() verifies we are not in core kernel
Expand Down Expand Up @@ -181,7 +184,11 @@ static void __jump_label_mod_update(struct jump_label_key *key, int enable)
struct jump_label_mod *mod = key->next;

while (mod) {
__jump_label_update(key, mod->entries, enable);
struct module *m = mod->mod;

__jump_label_update(key, mod->entries,
m->jump_entries + m->num_jump_entries,
enable);
mod = mod->next;
}
}
Expand Down Expand Up @@ -245,7 +252,8 @@ static int jump_label_add_module(struct module *mod)
key->next = jlm;

if (jump_label_enabled(key))
__jump_label_update(key, iter, JUMP_LABEL_ENABLE);
__jump_label_update(key, iter, iter_stop,
JUMP_LABEL_ENABLE);
}

return 0;
Expand Down Expand Up @@ -371,7 +379,7 @@ static void jump_label_update(struct jump_label_key *key, int enable)

/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, enable);
__jump_label_update(key, entry, __stop___jump_table, enable);

#ifdef CONFIG_MODULES
__jump_label_mod_update(key, enable);
Expand Down
31 changes: 24 additions & 7 deletions kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,18 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
static void ftrace_global_list_func(unsigned long ip,
unsigned long parent_ip)
{
struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
struct ftrace_ops *op;

if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
return;

trace_recursion_set(TRACE_GLOBAL_BIT);
op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) {
op->func(ip, parent_ip);
op = rcu_dereference_raw(op->next); /*see above*/
};
trace_recursion_clear(TRACE_GLOBAL_BIT);
}

static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
Expand Down Expand Up @@ -1638,12 +1644,12 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command);
}

static void ftrace_startup(struct ftrace_ops *ops, int command)
static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;

if (unlikely(ftrace_disabled))
return;
return -ENODEV;

ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
Expand All @@ -1662,6 +1668,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_hash_rec_enable(ops, 1);

ftrace_startup_enable(command);

return 0;
}

static void ftrace_shutdown(struct ftrace_ops *ops, int command)
Expand Down Expand Up @@ -2501,7 +2509,7 @@ static void __enable_ftrace_function_probe(void)

ret = __register_ftrace_function(&trace_probe_ops);
if (!ret)
ftrace_startup(&trace_probe_ops, 0);
ret = ftrace_startup(&trace_probe_ops, 0);

ftrace_probe_registered = 1;
}
Expand Down Expand Up @@ -3466,7 +3474,11 @@ device_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) do { } while (0)
# define ftrace_startup(ops, command) \
({ \
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
0; \
})
# define ftrace_shutdown(ops, command) do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
Expand All @@ -3484,6 +3496,10 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op;

if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
return;

trace_recursion_set(TRACE_INTERNAL_BIT);
/*
* Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched().
Expand All @@ -3496,6 +3512,7 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
op = rcu_dereference_raw(op->next);
};
preempt_enable_notrace();
trace_recursion_clear(TRACE_INTERNAL_BIT);
}

static void clear_ftrace_swapper(void)
Expand Down Expand Up @@ -3799,7 +3816,7 @@ int register_ftrace_function(struct ftrace_ops *ops)

ret = __register_ftrace_function(ops);
if (!ret)
ftrace_startup(ops, 0);
ret = ftrace_startup(ops, 0);


out_unlock:
Expand Down Expand Up @@ -4045,7 +4062,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;

ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);

out:
mutex_unlock(&ftrace_lock);
Expand Down
10 changes: 5 additions & 5 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -2216,7 +2216,7 @@ static noinline void trace_recursive_fail(void)

printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
"HC[%lu]:SC[%lu]:NMI[%lu]\n",
current->trace_recursion,
trace_recursion_buffer(),
hardirq_count() >> HARDIRQ_SHIFT,
softirq_count() >> SOFTIRQ_SHIFT,
in_nmi());
Expand All @@ -2226,9 +2226,9 @@ static noinline void trace_recursive_fail(void)

static inline int trace_recursive_lock(void)
{
current->trace_recursion++;
trace_recursion_inc();

if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
return 0;

trace_recursive_fail();
Expand All @@ -2238,9 +2238,9 @@ static inline int trace_recursive_lock(void)

static inline void trace_recursive_unlock(void)
{
WARN_ON_ONCE(!current->trace_recursion);
WARN_ON_ONCE(!trace_recursion_buffer());

current->trace_recursion--;
trace_recursion_dec();
}

#else
Expand Down
15 changes: 15 additions & 0 deletions kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -784,4 +784,19 @@ extern const char *__stop___trace_bprintk_fmt[];
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"

/* Only current can touch trace_recursion */
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)

/* Ring buffer has the 10 LSB bits to count */
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)

/* for function tracing recursion */
#define TRACE_INTERNAL_BIT (1<<11)
#define TRACE_GLOBAL_BIT (1<<12)

#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))

#endif /* _LINUX_KERNEL_TRACE_H */
7 changes: 6 additions & 1 deletion kernel/trace/trace_events.c
Original file line number Diff line number Diff line change
Expand Up @@ -1657,7 +1657,12 @@ static struct ftrace_ops trace_ops __initdata =

static __init void event_trace_self_test_with_function(void)
{
register_ftrace_function(&trace_ops);
int ret;
ret = register_ftrace_function(&trace_ops);
if (WARN_ON(ret < 0)) {
pr_info("Failed to enable function tracer for event tests\n");
return;
}
pr_info("Running tests again, along with the function tracer\n");
event_trace_self_tests();
unregister_ftrace_function(&trace_ops);
Expand Down
Loading

0 comments on commit d6a72fe

Please sign in to comment.