Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191025
b: refs/heads/master
c: 66a8cb9
h: refs/heads/master
i:
  191023: c80c2f1
v: v3
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Apr 1, 2010
1 parent 7baa4d9 commit 6fdc63f
Show file tree
Hide file tree
Showing 8 changed files with 80 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: eb0c53771fb2f5f66b0edb3ebce33be4bbf1c285
refs/heads/master: 66a8cb95ed04025664d1db4e952155ee1dccd048
4 changes: 2 additions & 2 deletions trunk/drivers/oprofile/cpu_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,14 +186,14 @@ int op_cpu_buffer_write_commit(struct op_entry *entry)
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL, NULL);
if (e)
goto event;
if (ring_buffer_swap_cpu(op_ring_buffer_read,
op_ring_buffer_write,
cpu))
return NULL;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL, NULL);
if (e)
goto event;
return NULL;
Expand Down
6 changes: 4 additions & 2 deletions trunk/include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);

struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);

struct ring_buffer_iter *
ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
Expand Down
72 changes: 66 additions & 6 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
#define TS_DELTA_TEST (~TS_MASK)

/* Flag when events were overwritten */
#define RB_MISSED_EVENTS (1 << 31)

struct buffer_data_page {
u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */
Expand Down Expand Up @@ -416,6 +419,12 @@ int ring_buffer_print_page_header(struct trace_seq *s)
(unsigned int)sizeof(field.commit),
(unsigned int)is_signed_type(long));

ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
1,
(unsigned int)is_signed_type(long));

ret = trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
Expand All @@ -439,6 +448,8 @@ struct ring_buffer_per_cpu {
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
unsigned long lost_events;
unsigned long last_overrun;
local_t commit_overrun;
local_t overrun;
local_t entries;
Expand Down Expand Up @@ -2835,6 +2846,7 @@ static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = NULL;
unsigned long overwrite;
unsigned long flags;
int nr_loops = 0;
int ret;
Expand Down Expand Up @@ -2895,6 +2907,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/* The reader page will be pointing to the new head */
rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);

/*
* We want to make sure we read the overruns after we set up our
* pointers to the next object. The writer side does a
* cmpxchg to cross pages which acts as the mb on the writer
* side. Note, the reader will constantly fail the swap
* while the writer is updating the pointers, so this
* guarantees that the overwrite recorded here is the one we
* want to compare with the last_overrun.
*/
smp_mb();
overwrite = local_read(&(cpu_buffer->overrun));

/*
* Here's the tricky part.
*
Expand Down Expand Up @@ -2926,6 +2950,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page = reader;
rb_reset_reader_page(cpu_buffer);

if (overwrite != cpu_buffer->last_overrun) {
cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
cpu_buffer->last_overrun = overwrite;
}

goto again;

out:
Expand Down Expand Up @@ -3002,8 +3031,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
rb_advance_iter(iter);
}

static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
{
return cpu_buffer->lost_events;
}

static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_event *event;
struct buffer_page *reader;
Expand Down Expand Up @@ -3055,6 +3090,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts);
}
if (lost_events)
*lost_events = rb_lost_events(cpu_buffer);
return event;

default:
Expand Down Expand Up @@ -3165,12 +3202,14 @@ static inline int rb_ok_to_lock(void)
* @buffer: The ring buffer to read
* @cpu: The cpu to peak at
* @ts: The timestamp counter of this event.
* @lost_events: a variable to store if events were lost (may be NULL)
*
* This will return the event that will be read next, but does
* not consume the data.
*/
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
Expand All @@ -3185,7 +3224,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(cpu_buffer, ts);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
if (dolock)
Expand Down Expand Up @@ -3227,13 +3266,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
/**
* ring_buffer_consume - return an event and consume it
* @buffer: The ring buffer to get the next event from
* @cpu: the cpu to read the buffer from
* @ts: a variable to store the timestamp (may be NULL)
* @lost_events: a variable to store if events were lost (may be NULL)
*
* Returns the next event in the ring buffer, and that event is consumed.
* Meaning, that sequential reads will keep returning a different event,
* and eventually empty the ring buffer if the producer is slower.
*/
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL;
Expand All @@ -3254,9 +3297,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
if (dolock)
spin_lock(&cpu_buffer->reader_lock);

event = rb_buffer_peek(cpu_buffer, ts);
if (event)
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
cpu_buffer->lost_events = 0;
rb_advance_reader(cpu_buffer);
}

if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
Expand Down Expand Up @@ -3405,6 +3450,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0;

cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;

rb_head_page_activate(cpu_buffer);
}

Expand Down Expand Up @@ -3684,6 +3732,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
unsigned int commit;
unsigned int read;
u64 save_timestamp;
int missed_events = 0;
int ret = -1;

if (!cpumask_test_cpu(cpu, buffer->cpumask))
Expand Down Expand Up @@ -3716,6 +3765,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
read = reader->read;
commit = rb_page_commit(reader);

/* Check if any events were dropped */
if (cpu_buffer->lost_events)
missed_events = 1;

/*
* If this page has been partially read or
* if len is not big enough to read the rest of the page or
Expand Down Expand Up @@ -3779,6 +3832,13 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
ret = read;

cpu_buffer->lost_events = 0;
/*
* Set a flag in the commit field if we lost events
*/
if (missed_events)
local_add(RB_MISSED_EVENTS, &bpage->commit);

out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/ring_buffer_benchmark.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ static enum event_status read_event(int cpu)
int *entry;
u64 ts;

event = ring_buffer_consume(buffer, cpu, &ts);
event = ring_buffer_consume(buffer, cpu, &ts, NULL);
if (!event)
return EVENT_DROPPED;

Expand Down
4 changes: 2 additions & 2 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1556,7 +1556,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
event = ring_buffer_peek(iter->tr->buffer, cpu, ts, NULL);

ftrace_enable_cpu();

Expand Down Expand Up @@ -1635,7 +1635,7 @@ static void trace_consume(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, NULL);
ftrace_enable_cpu();
}

Expand Down
5 changes: 3 additions & 2 deletions trunk/kernel/trace/trace_functions_graph.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,10 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
ring_buffer_consume(iter->tr->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
NULL);
NULL, NULL);
}

if (!event)
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/trace/trace_selftest.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
struct trace_entry *entry;
unsigned int loops = 0;

while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
entry = ring_buffer_event_data(event);

/*
Expand Down

0 comments on commit 6fdc63f

Please sign in to comment.