Skip to content

Commit

Permalink
ring-buffer: convert to raw spinlocks
Browse files Browse the repository at this point in the history
Impact: no lockdep debugging of ring buffer

The problem with running lockdep on the ring buffer is that the
ring buffer is the core infrastructure of ftrace. What happens is
that the tracer will start tracing the lockdep code while lockdep
is testing the ring buffers locks.  This can cause lockdep to
fail due to testing cases that have not fully finished their
locking transition.

This patch converts the spin locks used by the ring buffer back
into raw spin locks which lockdep does not check.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Nov 6, 2008
1 parent 9036990 commit 3e03fb7
Showing 1 changed file with 20 additions and 11 deletions.
31 changes: 20 additions & 11 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ static inline int test_time_stamp(u64 delta)
struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
spinlock_t lock;
raw_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head pages;
struct buffer_page *head_page; /* read from head */
Expand Down Expand Up @@ -291,7 +291,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)

cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->lock);
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);

page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
Expand Down Expand Up @@ -854,7 +854,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (write > BUF_PAGE_SIZE) {
struct buffer_page *next_page = tail_page;

spin_lock_irqsave(&cpu_buffer->lock, flags);
local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);

rb_inc_page(cpu_buffer, &next_page);

Expand Down Expand Up @@ -930,7 +931,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
rb_set_commit_to_write(cpu_buffer);
}

spin_unlock_irqrestore(&cpu_buffer->lock, flags);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);

/* fail and let the caller try again */
return ERR_PTR(-EAGAIN);
Expand All @@ -953,7 +955,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return event;

out_unlock:
spin_unlock_irqrestore(&cpu_buffer->lock, flags);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return NULL;
}

Expand Down Expand Up @@ -1524,7 +1527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
struct buffer_page *reader = NULL;
unsigned long flags;

spin_lock_irqsave(&cpu_buffer->lock, flags);
local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);

again:
reader = cpu_buffer->reader_page;
Expand Down Expand Up @@ -1574,7 +1578,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto again;

out:
spin_unlock_irqrestore(&cpu_buffer->lock, flags);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);

return reader;
}
Expand Down Expand Up @@ -1815,9 +1820,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();

spin_lock_irqsave(&cpu_buffer->lock, flags);
local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);
ring_buffer_iter_reset(iter);
spin_unlock_irqrestore(&cpu_buffer->lock, flags);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);

return iter;
}
Expand Down Expand Up @@ -1903,11 +1910,13 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (!cpu_isset(cpu, buffer->cpumask))
return;

spin_lock_irqsave(&cpu_buffer->lock, flags);
local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);

rb_reset_cpu(cpu_buffer);

spin_unlock_irqrestore(&cpu_buffer->lock, flags);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
}

/**
Expand Down

0 comments on commit 3e03fb7

Please sign in to comment.