Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140729
b: refs/heads/master
c: 00f62f6
h: refs/heads/master
i:
  140727: 0981377
v: v3
  • Loading branch information
Arnaldo Carvalho de Melo authored and Ingo Molnar committed Feb 11, 2009
1 parent 8553f16 commit 7cb280f
Show file tree
Hide file tree
Showing 9 changed files with 52 additions and 59 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e7669b8e329255bbcb40af65b38e342825d97a46
refs/heads/master: 00f62f614bb713027b9296068d1879fbca511eb7
9 changes: 5 additions & 4 deletions trunk/arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
* ignore such a protection.
*/
asm volatile(
"1: " _ASM_MOV " (%[parent]), %[old]\n"
"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
"1: " _ASM_MOV " (%[parent_old]), %[old]\n"
"2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n"
" movl $0, %[faulted]\n"
"3:\n"

Expand All @@ -481,8 +481,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b)

: [old] "=r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: [parent_replaced] "=r" (parent), [old] "=r" (old),
[faulted] "=r" (faulted)
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

Expand Down
2 changes: 1 addition & 1 deletion trunk/include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ struct ring_buffer;
struct ring_buffer_iter;

/*
* Don't refer to this struct directly, use functions below.
* Don't reference this struct directly, use functions below.
*/
struct ring_buffer_event {
u32 type:2, len:3, time_delta:27;
Expand Down
56 changes: 25 additions & 31 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ enum {
RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
};

static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;

/**
* tracing_on - enable all tracing buffers
Expand Down Expand Up @@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off);
* tracing_off_permanent - permanently disable ring buffers
*
* This function, once called, will disable all ring buffers
* permanently.
* permanenty.
*/
void tracing_off_permanent(void)
{
Expand Down Expand Up @@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);

struct buffer_data_page {
u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */
local_t commit; /* write commited index */
unsigned char data[]; /* data of buffer page */
};

Expand Down Expand Up @@ -260,7 +260,7 @@ struct ring_buffer_per_cpu {
struct list_head pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *commit_page; /* commited pages */
struct buffer_page *reader_page;
unsigned long overrun;
unsigned long entries;
Expand All @@ -273,8 +273,8 @@ struct ring_buffer {
unsigned pages;
unsigned flags;
int cpus;
cpumask_var_t cpumask;
atomic_t record_disabled;
cpumask_var_t cpumask;

struct mutex mutex;

Expand Down Expand Up @@ -303,7 +303,7 @@ struct ring_buffer_iter {
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
* As a safety measure we check to make sure the data pages have not
* As a safty measure we check to make sure the data pages have not
* been corrupted.
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
Expand Down Expand Up @@ -2332,14 +2332,13 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);

static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_data_page *bpage,
unsigned int offset)
struct buffer_data_page *bpage)
{
struct ring_buffer_event *event;
unsigned long head;

__raw_spin_lock(&cpu_buffer->lock);
for (head = offset; head < local_read(&bpage->commit);
for (head = 0; head < local_read(&bpage->commit);
head += rb_event_length(event)) {

event = __rb_data_page_index(bpage, head);
Expand Down Expand Up @@ -2407,12 +2406,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* to swap with a page in the ring buffer.
*
* for example:
* rpage = ring_buffer_alloc_read_page(buffer);
* rpage = ring_buffer_alloc_page(buffer);
* if (!rpage)
* return error;
* ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
* if (ret >= 0)
* process_page(rpage, ret);
* if (ret)
* process_page(rpage);
*
* When @full is set, the function will not return true unless
* the writer is off the reader page.
Expand All @@ -2423,8 +2422,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* responsible for that.
*
* Returns:
* >=0 if data has been transferred, returns the offset of consumed data.
* <0 if no data has been transferred.
* 1 if data has been transferred
* 0 if no data has been transferred.
*/
int ring_buffer_read_page(struct ring_buffer *buffer,
void **data_page, int cpu, int full)
Expand All @@ -2433,8 +2432,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
struct ring_buffer_event *event;
struct buffer_data_page *bpage;
unsigned long flags;
unsigned int read;
int ret = -1;
int ret = 0;

if (!data_page)
return 0;
Expand All @@ -2456,29 +2454,25 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
/* check for data */
if (!local_read(&cpu_buffer->reader_page->page->commit))
goto out;

read = cpu_buffer->reader_page->read;
/*
* If the writer is already off of the read page, then simply
* switch the read page with the given page. Otherwise
* we need to copy the data from the reader to the writer.
*/
if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
unsigned int commit = rb_page_commit(cpu_buffer->reader_page);
struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
unsigned int read = cpu_buffer->reader_page->read;

if (full)
goto out;
/* The writer is still on the reader page, we must copy */
memcpy(bpage->data + read, rpage->data + read, commit - read);
bpage = cpu_buffer->reader_page->page;
memcpy(bpage->data,
cpu_buffer->reader_page->page->data + read,
local_read(&bpage->commit) - read);

/* consume what was read */
cpu_buffer->reader_page->read = commit;
cpu_buffer->reader_page += read;

/* update bpage */
local_set(&bpage->commit, commit);
if (!read)
bpage->time_stamp = rpage->time_stamp;
} else {
/* swap the pages */
rb_init_page(bpage);
Expand All @@ -2487,10 +2481,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
cpu_buffer->reader_page->read = 0;
*data_page = bpage;
}
ret = read;
ret = 1;

/* update the entry counter */
rb_remove_entries(cpu_buffer, bpage, read);
rb_remove_entries(cpu_buffer, bpage);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

Expand All @@ -2501,7 +2495,7 @@ static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *p = filp->private_data;
long *p = filp->private_data;
char buf[64];
int r;

Expand All @@ -2517,9 +2511,9 @@ static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *p = filp->private_data;
long *p = filp->private_data;
char buf[64];
unsigned long val;
long val;
int ret;

if (cnt >= sizeof(buf))
Expand Down
20 changes: 8 additions & 12 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
static int tracing_disabled = 1;
int tracing_disabled = 1;

static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);

Expand Down Expand Up @@ -459,8 +459,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
* Register a new plugin tracer.
*/
int register_tracer(struct tracer *type)
__releases(kernel_lock)
__acquires(kernel_lock)
{
struct tracer *t;
int len;
Expand Down Expand Up @@ -628,7 +626,7 @@ static int cmdline_idx;
static DEFINE_SPINLOCK(trace_cmdline_lock);

/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
atomic_t trace_record_cmdline_disabled __read_mostly;

static void trace_init_cmdlines(void)
{
Expand Down Expand Up @@ -985,12 +983,10 @@ static void ftrace_trace_userstack(struct trace_array *tr,
#endif
}

#ifdef UNUSED
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
void __trace_userstack(struct trace_array *tr, unsigned long flags)
{
ftrace_trace_userstack(tr, flags, preempt_count());
}
#endif /* UNUSED */

static void
ftrace_trace_special(void *__tr,
Expand Down Expand Up @@ -1724,7 +1720,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
return 0;
}

static int tracing_release(struct inode *inode, struct file *file)
int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
struct trace_iterator *iter = m->private;
Expand Down Expand Up @@ -1967,7 +1963,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
struct tracer_opt *trace_opts = current_trace->flags->opts;


/* calculate max size */
/* calulate max size */
for (i = 0; trace_options[i]; i++) {
len += strlen(trace_options[i]);
len += 3; /* "no" and space */
Expand Down Expand Up @@ -2149,7 +2145,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
{
struct trace_array *tr = filp->private_data;
char buf[64];
unsigned long val;
long val;
int ret;

if (cnt >= sizeof(buf))
Expand Down Expand Up @@ -2297,9 +2293,9 @@ static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
long *ptr = filp->private_data;
char buf[64];
unsigned long val;
long val;
int ret;

if (cnt >= sizeof(buf))
Expand Down
4 changes: 3 additions & 1 deletion trunk/kernel/trace/trace_branch.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)

int enable_branch_tracing(struct trace_array *tr)
{
int ret = 0;

mutex_lock(&branch_tracing_mutex);
branch_tracer = tr;
/*
Expand All @@ -101,7 +103,7 @@ int enable_branch_tracing(struct trace_array *tr)
branch_tracing_enabled++;
mutex_unlock(&branch_tracing_mutex);

return 0;
return ret;
}

void disable_branch_tracing(void)
Expand Down
14 changes: 7 additions & 7 deletions trunk/kernel/trace/trace_functions_graph.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,30 +186,30 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
ret = trace_seq_printf(s,
" ------------------------------------------\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
TRACE_TYPE_PARTIAL_LINE;

ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
TRACE_TYPE_PARTIAL_LINE;

ret = print_graph_proc(s, prev_pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
TRACE_TYPE_PARTIAL_LINE;

ret = trace_seq_printf(s, " => ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
TRACE_TYPE_PARTIAL_LINE;

ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
TRACE_TYPE_PARTIAL_LINE;

ret = trace_seq_printf(s,
"\n ------------------------------------------\n\n");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
TRACE_TYPE_PARTIAL_LINE;

return TRACE_TYPE_HANDLED;
return ret;
}

static struct ftrace_graph_ret_entry *
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/trace_hw_branches.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ static void bts_trace_start(struct trace_array *tr)
}

/*
* Stop tracing on the current cpu.
* Start tracing on the current cpu.
* The argument is ignored.
*
* pre: bts_tracer_mutex must be locked.
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/trace_sysprof.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
}
}

static const struct stacktrace_ops backtrace_ops = {
const static struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
Expand Down

0 comments on commit 7cb280f

Please sign in to comment.