Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191020
b: refs/heads/master
c: cb6e943
h: refs/heads/master
v: v3
  • Loading branch information
Andi Kleen authored and Robert Richter committed Apr 23, 2010
1 parent ef3f1c5 commit 4d1d65e
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 51 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a36bf32e9e8a86f291f746b7f8292e042ee04a46
refs/heads/master: cb6e943ccf19ab6d3189147e9d625a992e016084
63 changes: 13 additions & 50 deletions trunk/drivers/oprofile/cpu_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,23 +30,7 @@

#define OP_BUFFER_FLAGS 0

/*
* Read and write access is using spin locking. Thus, writing to the
* buffer by NMI handler (x86) could occur also during critical
* sections when reading the buffer. To avoid this, there are 2
* buffers for independent read and write access. Read access is in
* process context only, write access only in the NMI handler. If the
* read buffer runs empty, both buffers are swapped atomically. There
* is potentially a small window during swapping where the buffers are
* disabled and samples could be lost.
*
* Using 2 buffers is a little bit overhead, but the solution is clear
* and does not require changes in the ring buffer implementation. It
* can be changed to a single buffer solution when the ring buffer
* access is implemented as non-locking atomic code.
*/
static struct ring_buffer *op_ring_buffer_read;
static struct ring_buffer *op_ring_buffer_write;
static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);

static void wq_sync_buffer(struct work_struct *work);
Expand All @@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)

void free_cpu_buffers(void)
{
if (op_ring_buffer_read)
ring_buffer_free(op_ring_buffer_read);
op_ring_buffer_read = NULL;
if (op_ring_buffer_write)
ring_buffer_free(op_ring_buffer_write);
op_ring_buffer_write = NULL;
if (op_ring_buffer)
ring_buffer_free(op_ring_buffer);
op_ring_buffer = NULL;
}

#define RB_EVENT_HDR_SIZE 4
Expand All @@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
RB_EVENT_HDR_SIZE);

op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer_read)
goto fail;
op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer_write)
op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
if (!op_ring_buffer)
goto fail;

for_each_possible_cpu(i) {
Expand Down Expand Up @@ -162,16 +140,11 @@ struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
{
entry->event = ring_buffer_lock_reserve
(op_ring_buffer_write, sizeof(struct op_sample) +
(op_ring_buffer, sizeof(struct op_sample) +
size * sizeof(entry->sample->data[0]));
if (entry->event)
entry->sample = ring_buffer_event_data(entry->event);
else
entry->sample = NULL;

if (!entry->sample)
if (!entry->event)
return NULL;

entry->sample = ring_buffer_event_data(entry->event);
entry->size = size;
entry->data = entry->sample->data;

Expand All @@ -180,25 +153,16 @@ struct op_sample

int op_cpu_buffer_write_commit(struct op_entry *entry)
{
return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
}

struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
goto event;
if (ring_buffer_swap_cpu(op_ring_buffer_read,
op_ring_buffer_write,
cpu))
e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
if (!e)
return NULL;
e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
if (e)
goto event;
return NULL;

event:
entry->event = e;
entry->sample = ring_buffer_event_data(e);
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
Expand All @@ -209,8 +173,7 @@ struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)

unsigned long op_cpu_buffer_entries(int cpu)
{
return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
return ring_buffer_entries_cpu(op_ring_buffer, cpu);
}

static int
Expand Down

0 comments on commit 4d1d65e

Please sign in to comment.