Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 141026
b: refs/heads/master
c: b8b9426
h: refs/heads/master
v: v3
  • Loading branch information
Dmitri Vorobiev authored and Ingo Molnar committed Mar 22, 2009
1 parent fbc3f34 commit 432b4bc
Show file tree
Hide file tree
Showing 14 changed files with 69 additions and 898 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9a8118baaeb0eaa148913bed77bf9c6335f6ca63
refs/heads/master: b8b94265337f83b7db9c5f429b1769d463d7da8c
11 changes: 3 additions & 8 deletions trunk/include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,10 @@ struct ring_buffer_event {
/**
* enum ring_buffer_type - internal ring buffer types
*
* @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
* If time_delta is 0:
* array is ignored
* size is variable depending on how much
* @RINGBUF_TYPE_PADDING: Left over page padding
* array is ignored
* size is variable depending on how much
* padding is needed
* If time_delta is non zero:
* everything else same as RINGBUF_TYPE_DATA
*
* @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
* array[0] = time delta (28 .. 59)
Expand Down Expand Up @@ -68,8 +65,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
return event->time_delta;
}

void ring_buffer_event_discard(struct ring_buffer_event *event);

/*
* size is in bytes for each per CPU buffer.
*/
Expand Down
14 changes: 12 additions & 2 deletions trunk/kernel/extable.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,21 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/ftrace.h>
#include <asm/uaccess.h>

#include <asm/sections.h>
#include <asm/uaccess.h>

/*
* mutex protecting text section modification (dynamic code patching).
* some users need to sleep (allocating memory...) while they hold this lock.
*
* NOT exported to modules - patching kernel text is a really delicate matter.
*/
DEFINE_MUTEX(text_mutex);

extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[];
Expand Down
1 change: 0 additions & 1 deletion trunk/kernel/trace/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,5 @@ obj-$(CONFIG_EVENT_TRACER) += events.o
obj-$(CONFIG_EVENT_TRACER) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o

libftrace-y := ftrace.o
125 changes: 24 additions & 101 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,65 +189,16 @@ enum {
RB_LEN_TIME_STAMP = 16,
};

static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
}

static inline int rb_discarded_event(struct ring_buffer_event *event)
{
return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
}

static void rb_event_set_padding(struct ring_buffer_event *event)
{
event->type = RINGBUF_TYPE_PADDING;
event->time_delta = 0;
}

/**
* ring_buffer_event_discard - discard an event in the ring buffer
* @buffer: the ring buffer
* @event: the event to discard
*
* Sometimes a event that is in the ring buffer needs to be ignored.
* This function lets the user discard an event in the ring buffer
* and then that event will not be read later.
*
* Note, it is up to the user to be careful with this, and protect
* against races. If the user discards an event that has been consumed
* it is possible that it could corrupt the ring buffer.
*/
void ring_buffer_event_discard(struct ring_buffer_event *event)
{
event->type = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */
if (!event->time_delta)
event->time_delta = 1;
}

static unsigned
rb_event_data_length(struct ring_buffer_event *event)
{
unsigned length;

if (event->len)
length = event->len * RB_ALIGNMENT;
else
length = event->array[0];
return length + RB_EVNT_HDR_SIZE;
}

/* inline for ring buffer fast paths */
static unsigned
rb_event_length(struct ring_buffer_event *event)
{
unsigned length;

switch (event->type) {
case RINGBUF_TYPE_PADDING:
if (rb_null_event(event))
/* undefined */
return -1;
return rb_event_data_length(event);
/* undefined */
return -1;

case RINGBUF_TYPE_TIME_EXTEND:
return RB_LEN_TIME_EXTEND;
Expand All @@ -256,7 +207,11 @@ rb_event_length(struct ring_buffer_event *event)
return RB_LEN_TIME_STAMP;

case RINGBUF_TYPE_DATA:
return rb_event_data_length(event);
if (event->len)
length = event->len * RB_ALIGNMENT;
else
length = event->array[0];
return length + RB_EVNT_HDR_SIZE;
default:
BUG();
}
Expand Down Expand Up @@ -580,8 +535,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
extern int ring_buffer_page_too_big(void);

#ifdef CONFIG_HOTPLUG_CPU
static int __cpuinit rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
#endif

/**
Expand Down Expand Up @@ -890,6 +845,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);

static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type == RINGBUF_TYPE_PADDING;
}

static inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
Expand Down Expand Up @@ -1259,7 +1219,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
rb_event_set_padding(event);
event->type = RINGBUF_TYPE_PADDING;
}

if (tail <= BUF_PAGE_SIZE)
Expand Down Expand Up @@ -2009,7 +1969,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)

event = rb_reader_event(cpu_buffer);

if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
if (event->type == RINGBUF_TYPE_DATA)
cpu_buffer->entries--;

rb_update_read_stamp(cpu_buffer, event);
Expand Down Expand Up @@ -2092,18 +2052,9 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)

switch (event->type) {
case RINGBUF_TYPE_PADDING:
if (rb_null_event(event))
RB_WARN_ON(cpu_buffer, 1);
/*
* Because the writer could be discarding every
* event it creates (which would probably be bad)
* if we were to go back to "again" then we may never
* catch up, and will trigger the warn on, or lock
* the box. Return the padding, and we will release
* the current locks, and try again.
*/
RB_WARN_ON(cpu_buffer, 1);
rb_advance_reader(cpu_buffer);
return event;
return NULL;

case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
Expand Down Expand Up @@ -2164,12 +2115,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)

switch (event->type) {
case RINGBUF_TYPE_PADDING:
if (rb_null_event(event)) {
rb_inc_iter(iter);
goto again;
}
rb_advance_iter(iter);
return event;
rb_inc_iter(iter);
goto again;

case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
Expand Down Expand Up @@ -2216,16 +2163,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;

again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}

return event;
}

Expand All @@ -2244,16 +2185,10 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event;
unsigned long flags;

again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}

return event;
}

Expand All @@ -2272,7 +2207,6 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_event *event = NULL;
unsigned long flags;

again:
/* might be called in atomic */
preempt_disable();

Expand All @@ -2294,11 +2228,6 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
out:
preempt_enable();

if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}

return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_consume);
Expand Down Expand Up @@ -2377,7 +2306,6 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;

again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
if (!event)
Expand All @@ -2387,11 +2315,6 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}

return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_read);
Expand Down Expand Up @@ -2861,8 +2784,8 @@ static __init int rb_init_debugfs(void)
fs_initcall(rb_init_debugfs);

#ifdef CONFIG_HOTPLUG_CPU
static int __cpuinit rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify);
Expand Down
Loading

0 comments on commit 432b4bc

Please sign in to comment.