Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 146218
b: refs/heads/master
c: 1f8a6a1
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Steven Rostedt committed Jun 8, 2009
1 parent 5eb13c9 commit 0cfe24e
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 918143e8b7d6153d7a83a3f854323407939f4a7e
refs/heads/master: 1f8a6a10fb9437eac3f516ea4324a19087872f30
14 changes: 13 additions & 1 deletion trunk/include/linux/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,19 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
* size is in bytes for each per CPU buffer.
*/
struct ring_buffer *
ring_buffer_alloc(unsigned long size, unsigned flags);
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);

/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
* ring buffer's lock class separate.
*/
#define ring_buffer_alloc(size, flags) \
({ \
static struct lock_class_key __key; \
__ring_buffer_alloc((size), (flags), &__key); \
})

void ring_buffer_free(struct ring_buffer *buffer);

int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
Expand Down
9 changes: 7 additions & 2 deletions trunk/kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,8 @@ struct ring_buffer {
atomic_t record_disabled;
cpumask_var_t cpumask;

struct lock_class_key *reader_lock_key;

struct mutex mutex;

struct ring_buffer_per_cpu **buffers;
Expand Down Expand Up @@ -565,6 +567,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);

Expand Down Expand Up @@ -635,7 +638,8 @@ static int rb_cpu_notify(struct notifier_block *self,
* when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head.
*/
struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct ring_buffer *buffer;
int bsize;
Expand All @@ -658,6 +662,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;

/* need at least two pages */
if (buffer->pages == 1)
Expand Down Expand Up @@ -715,7 +720,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
kfree(buffer);
return NULL;
}
EXPORT_SYMBOL_GPL(ring_buffer_alloc);
EXPORT_SYMBOL_GPL(__ring_buffer_alloc);

/**
* ring_buffer_free - free a ring buffer.
Expand Down

0 comments on commit 0cfe24e

Please sign in to comment.