Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140985
b: refs/heads/master
c: f02b862
h: refs/heads/master
i:
  140983: c4d94e0
v: v3
  • Loading branch information
Ananth N Mavinakayanahalli authored and Ingo Molnar committed Mar 18, 2009
1 parent 55a911c commit 4dc7575
Show file tree
Hide file tree
Showing 11 changed files with 54 additions and 145 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3bf832ce1fe6988148d392599f34ca0c6a34427d
refs/heads/master: f02b8624fedca39886b0eef770dca70c2f0749b3
6 changes: 4 additions & 2 deletions trunk/include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ struct ftrace_branch_data {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};

Expand Down Expand Up @@ -126,7 +125,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
.line = __LINE__, \
}; \
______r = !!(cond); \
______f.miss_hit[______r]++; \
if (______r) \
______f.hit++; \
else \
______f.miss++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
Expand Down
7 changes: 2 additions & 5 deletions trunk/include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,11 +118,8 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);

u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
int cpu, u64 *ts);
void ring_buffer_set_clock(struct ring_buffer *buffer,
u64 (*clock)(void));
u64 ring_buffer_time_stamp(int cpu);
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);

size_t ring_buffer_page_len(void *page);

Expand Down
4 changes: 1 addition & 3 deletions trunk/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -919,10 +919,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
ri->rp = rp;
ri->task = current;

if (rp->entry_handler && rp->entry_handler(ri, regs)) {
spin_unlock_irqrestore(&rp->lock, flags);
if (rp->entry_handler && rp->entry_handler(ri, regs))
return 0;
}

arch_prepare_kretprobe(ri, regs);

Expand Down
74 changes: 27 additions & 47 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,29 @@ EXPORT_SYMBOL_GPL(tracing_is_on);

#include "trace.h"

/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0

u64 ring_buffer_time_stamp(int cpu)
{
u64 time;

preempt_disable_notrace();
/* shift to debug/test normalization and TIME_EXTENTS */
time = trace_clock_local() << DEBUG_SHIFT;
preempt_enable_no_resched_notrace();

return time;
}
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);

void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
*ts >>= DEBUG_SHIFT;
}
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);

#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA 28
Expand Down Expand Up @@ -351,7 +374,6 @@ struct ring_buffer {
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
u64 (*clock)(void);
};

struct ring_buffer_iter {
Expand All @@ -372,30 +394,6 @@ struct ring_buffer_iter {
_____ret; \
})

/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0

u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
{
u64 time;

preempt_disable_notrace();
/* shift to debug/test normalization and TIME_EXTENTS */
time = buffer->clock() << DEBUG_SHIFT;
preempt_enable_no_resched_notrace();

return time;
}
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);

void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
*ts >>= DEBUG_SHIFT;
}
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);

/**
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
Expand Down Expand Up @@ -571,23 +569,13 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)

buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
buffer->clock = trace_clock_local;

/* need at least two pages */
if (buffer->pages == 1)
buffer->pages++;

/*
* In case of non-hotplug cpu, if the ring-buffer is allocated
* in early initcall, it will not be notified of secondary cpus.
* In that off case, we need to allocate for all possible cpus.
*/
#ifdef CONFIG_HOTPLUG_CPU
get_online_cpus();
cpumask_copy(buffer->cpumask, cpu_online_mask);
#else
cpumask_copy(buffer->cpumask, cpu_possible_mask);
#endif
buffer->cpus = nr_cpu_ids;

bsize = sizeof(void *) * nr_cpu_ids;
Expand Down Expand Up @@ -657,12 +645,6 @@ ring_buffer_free(struct ring_buffer *buffer)
}
EXPORT_SYMBOL_GPL(ring_buffer_free);

void ring_buffer_set_clock(struct ring_buffer *buffer,
u64 (*clock)(void))
{
buffer->clock = clock;
}

static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);

static void
Expand Down Expand Up @@ -1209,7 +1191,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->tail_page = next_page;

/* reread the time stamp */
*ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
*ts = ring_buffer_time_stamp(cpu_buffer->cpu);
cpu_buffer->tail_page->page->time_stamp = *ts;
}

Expand Down Expand Up @@ -1352,7 +1334,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
return NULL;

ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
ts = ring_buffer_time_stamp(cpu_buffer->cpu);

/*
* Only the first commit can update the timestamp.
Expand Down Expand Up @@ -2069,8 +2051,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = cpu_buffer->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(buffer,
cpu_buffer->cpu, ts);
ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
}
return event;

Expand Down Expand Up @@ -2131,8 +2112,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = iter->read_stamp + event->time_delta;
ring_buffer_normalize_time_stamp(buffer,
cpu_buffer->cpu, ts);
ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
}
return event;

Expand Down
55 changes: 11 additions & 44 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,13 @@ ns2usecs(cycle_t nsec)
return nsec;
}

cycle_t ftrace_now(int cpu)
{
u64 ts = ring_buffer_time_stamp(cpu);
ring_buffer_normalize_time_stamp(cpu, &ts);
return ts;
}

/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
Expand All @@ -171,20 +178,6 @@ static struct trace_array global_trace;

static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);

cycle_t ftrace_now(int cpu)
{
u64 ts;

/* Early boot up does not have a buffer yet */
if (!global_trace.buffer)
return trace_clock_local();

ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);

return ts;
}

/*
* The max_tr is used to snapshot the global_trace when a maximum
* latency is reached. Some tracers will use this to store a maximum
Expand Down Expand Up @@ -315,7 +308,6 @@ static const char *trace_options[] = {
"printk-msg-only",
"context-info",
"latency-format",
"global-clock",
NULL
};

Expand Down Expand Up @@ -2252,34 +2244,6 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return 0;
}

static void set_tracer_flags(unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
return;

if (enabled)
trace_flags |= mask;
else
trace_flags &= ~mask;

if (mask == TRACE_ITER_GLOBAL_CLK) {
u64 (*func)(void);

if (enabled)
func = trace_clock_global;
else
func = trace_clock_local;

mutex_lock(&trace_types_lock);
ring_buffer_set_clock(global_trace.buffer, func);

if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, func);
mutex_unlock(&trace_types_lock);
}
}

static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
Expand Down Expand Up @@ -2307,7 +2271,10 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
int len = strlen(trace_options[i]);

if (strncmp(cmp, trace_options[i], len) == 0) {
set_tracer_flags(1 << i, !neg);
if (neg)
trace_flags &= ~(1 << i);
else
trace_flags |= (1 << i);
break;
}
}
Expand Down
1 change: 0 additions & 1 deletion trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,6 @@ enum trace_iterator_flags {
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x40000,
TRACE_ITER_GLOBAL_CLK = 0x80000,
};

/*
Expand Down
8 changes: 2 additions & 6 deletions trunk/kernel/trace/trace_power.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,14 +122,10 @@ static int tracing_power_register(void)
static void start_power_trace(struct trace_array *tr)
{
trace_power_enabled = 1;
tracing_power_register();
}

static void stop_power_trace(struct trace_array *tr)
{
trace_power_enabled = 0;
}

static void power_trace_reset(struct trace_array *tr)
{
trace_power_enabled = 0;
unregister_trace_power_start(probe_power_start);
Expand Down Expand Up @@ -192,7 +188,7 @@ static struct tracer power_tracer __read_mostly =
.init = power_trace_init,
.start = start_power_trace,
.stop = stop_power_trace,
.reset = power_trace_reset,
.reset = stop_power_trace,
.print_line = power_print_line,
};

Expand Down
9 changes: 4 additions & 5 deletions trunk/kernel/trace/trace_sched_switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
static int sched_stopped;

static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
Expand All @@ -29,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
int cpu;
int pc;

if (!sched_ref || sched_stopped)
if (!sched_ref)
return;

tracing_record_cmdline(prev);
Expand Down Expand Up @@ -194,7 +193,6 @@ static void stop_sched_trace(struct trace_array *tr)
static int sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
tracing_reset_online_cpus(tr);
tracing_start_sched_switch_record();
return 0;
}
Expand All @@ -207,12 +205,13 @@ static void sched_switch_trace_reset(struct trace_array *tr)

static void sched_switch_trace_start(struct trace_array *tr)
{
sched_stopped = 0;
tracing_reset_online_cpus(tr);
tracing_start_sched_switch();
}

static void sched_switch_trace_stop(struct trace_array *tr)
{
sched_stopped = 1;
tracing_stop_sched_switch();
}

static struct tracer sched_switch_trace __read_mostly =
Expand Down
Loading

0 comments on commit 4dc7575

Please sign in to comment.