Skip to content

Commit

Permalink
ring-buffer: Micro-optimize with some strategic inlining
Browse files Browse the repository at this point in the history
By using inline and noinline, we are able to make the fast path of
recording an event 4% faster.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
  • Loading branch information
Steven Rostedt authored and Steven Rostedt committed Oct 20, 2010
1 parent 140ff89 commit d9abde2
Showing 1 changed file with 15 additions and 8 deletions.
23 changes: 15 additions & 8 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -2078,7 +2078,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
local_inc(&cpu_buffer->commits);
}

static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned long commits;

Expand Down Expand Up @@ -2193,13 +2193,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,

#define TRACE_RECURSIVE_DEPTH 16

static int trace_recursive_lock(void)
/* Keep this code out of the fast path cache */
static noinline void trace_recursive_fail(void)
{
current->trace_recursion++;

if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
return 0;

/* Disable all tracing before we do anything else */
tracing_off_permanent();

Expand All @@ -2211,10 +2207,21 @@ static int trace_recursive_lock(void)
in_nmi());

WARN_ON_ONCE(1);
}

static inline int trace_recursive_lock(void)
{
current->trace_recursion++;

if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
return 0;

trace_recursive_fail();

return -1;
}

static void trace_recursive_unlock(void)
static inline void trace_recursive_unlock(void)
{
WARN_ON_ONCE(!current->trace_recursion);

Expand Down

0 comments on commit d9abde2

Please sign in to comment.