Skip to content

Commit

Permalink
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/rostedt/linux-2.6-trace into perf/core
  • Loading branch information
Ingo Molnar committed Jul 5, 2011
2 parents 5d67be9 + 1fd8df2 commit 931da61
Show file tree
Hide file tree
Showing 18 changed files with 333 additions and 321 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kernel/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace)
}
EXPORT_SYMBOL_GPL(save_stack_trace);

void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/kmemcheck/error.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 0;
save_stack_trace_regs(&e->trace, regs);
save_stack_trace_regs(regs, &e->trace);

/* Round address down to nearest 16 bytes */
shadow_copy = kmemcheck_shadow_lookup(address
Expand Down
4 changes: 4 additions & 0 deletions include/linux/ftrace_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);

Expand Down
2 changes: 1 addition & 1 deletion include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
size_t ring_buffer_page_len(void *page);


void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
Expand Down
4 changes: 2 additions & 2 deletions include/linux/stacktrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ struct stack_trace {
};

extern void save_stack_trace(struct stack_trace *trace);
extern void save_stack_trace_regs(struct stack_trace *trace,
struct pt_regs *regs);
extern void save_stack_trace_regs(struct pt_regs *regs,
struct stack_trace *trace);
extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);

Expand Down
12 changes: 7 additions & 5 deletions kernel/async.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel.
*/

#include <linux/async.h>
#include <linux/atomic.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>

static async_cookie_t next_cookie = 1;

Expand Down Expand Up @@ -128,15 +129,16 @@ static void async_run_entry_fn(struct work_struct *work)

/* 2) run (and print duration) */
if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
(long long)entry->cookie,
entry->func, task_pid_nr(current));
calltime = ktime_get();
}
entry->func(entry->data, entry->cookie);
if (initcall_debug && system_state == SYSTEM_BOOTING) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
printk("initcall %lli_%pF returned 0 after %lld usecs\n",
printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
(long long)entry->cookie,
entry->func,
(long long)ktime_to_ns(delta) >> 10);
Expand Down Expand Up @@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
ktime_t starttime, delta, endtime;

if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk("async_waiting @ %i\n", task_pid_nr(current));
printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get();
}

Expand All @@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
endtime = ktime_get();
delta = ktime_sub(endtime, starttime);

printk("async_continuing @ %i after %lli usec\n",
printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
task_pid_nr(current),
(long long)ktime_to_ns(delta) >> 10);
}
Expand Down
12 changes: 9 additions & 3 deletions kernel/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
EXPORT_SYMBOL_GPL(print_stack_trace);

/*
* Architectures that do not implement save_stack_trace_tsk get this
* weak alias and a once-per-bootup warning (whenever this facility
* is utilized - for example by procfs):
* Architectures that do not implement save_stack_trace_tsk or
* save_stack_trace_regs get this weak alias and a once-per-bootup warning
* (whenever this facility is utilized - for example by procfs):
*/
__weak void
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
}

__weak void
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
}
20 changes: 4 additions & 16 deletions kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@

#include <trace/events/sched.h>

#include <asm/ftrace.h>
#include <asm/setup.h>

#include "trace_output.h"
Expand Down Expand Up @@ -82,8 +81,7 @@ static int ftrace_disabled __read_mostly;

static DEFINE_MUTEX(ftrace_lock);

static struct ftrace_ops ftrace_list_end __read_mostly =
{
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
};

Expand Down Expand Up @@ -785,8 +783,7 @@ static void unregister_ftrace_profiler(void)
unregister_ftrace_graph();
}
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly =
{
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
};

Expand All @@ -806,19 +803,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long val;
char buf[64]; /* big enough to hold a number */
int ret;

if (cnt >= sizeof(buf))
return -EINVAL;

if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;

buf[cnt] = 0;

ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;

val = !!val;
Expand Down
66 changes: 36 additions & 30 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -997,26 +997,33 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages)
{
struct buffer_page *bpage, *tmp;
unsigned long addr;
LIST_HEAD(pages);
unsigned i;

WARN_ON(!nr_pages);

for (i = 0; i < nr_pages; i++) {
struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation fails
* gracefully without invoking oom-killer and the system is
* not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu_buffer->cpu));
if (!bpage)
goto free_pages;

rb_check_bpage(cpu_buffer, bpage);

list_add(&bpage->list, &pages);

addr = __get_free_page(GFP_KERNEL);
if (!addr)
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages;
bpage->page = (void *)addr;
bpage->page = page_address(page);
rb_init_page(bpage->page);
}

Expand Down Expand Up @@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
unsigned long addr;
struct page *page;
int ret;

cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
Expand All @@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
rb_check_bpage(cpu_buffer, bpage);

cpu_buffer->reader_page = bpage;
addr = __get_free_page(GFP_KERNEL);
if (!addr)
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
if (!page)
goto fail_free_reader;
bpage->page = (void *)addr;
bpage->page = page_address(page);
rb_init_page(bpage->page);

INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Expand Down Expand Up @@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
unsigned nr_pages, rm_pages, new_pages;
struct buffer_page *bpage, *tmp;
unsigned long buffer_size;
unsigned long addr;
LIST_HEAD(pages);
int i, cpu;

Expand Down Expand Up @@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)

for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) {
struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation
* fails gracefully without invoking oom-killer and
* the system is not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu));
if (!bpage)
goto free_pages;
list_add(&bpage->list, &pages);
addr = __get_free_page(GFP_KERNEL);
if (!addr)
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages;
bpage->page = (void *)addr;
bpage->page = page_address(page);
rb_init_page(bpage->page);
}
}
Expand Down Expand Up @@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns:
* The page allocated, or NULL on error.
*/
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{
struct buffer_data_page *bpage;
unsigned long addr;
struct page *page;

addr = __get_free_page(GFP_KERNEL);
if (!addr)
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
return NULL;

bpage = (void *)addr;
bpage = page_address(page);

rb_init_page(bpage);

Expand Down Expand Up @@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *p = filp->private_data;
char buf[64];
unsigned long val;
int ret;

if (cnt >= sizeof(buf))
return -EINVAL;

if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;

buf[cnt] = 0;

ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;

if (val)
Expand Down
2 changes: 1 addition & 1 deletion kernel/trace/ring_buffer_benchmark.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ static enum event_status read_page(int cpu)
int inc;
int i;

bpage = ring_buffer_alloc_read_page(buffer);
bpage = ring_buffer_alloc_read_page(buffer, cpu);
if (!bpage)
return EVENT_DROPPED;

Expand Down
Loading

0 comments on commit 931da61

Please sign in to comment.