Skip to content

Commit

Permalink
ftrace: user raw_spin_lock in tracing
Browse files Browse the repository at this point in the history
Lock debugging enabled cause huge performance problems for tracing. Having
the lock verification happening for every function that is called
because mcount calls spin_lock can cripple the system.

This patch converts the spin_locks used by ftrace into raw_spin_locks.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Steven Rostedt authored and Thomas Gleixner committed May 23, 2008
1 parent c5f888c commit 92205c2
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 22 deletions.
51 changes: 30 additions & 21 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,8 @@ static const char *trace_options[] = {
NULL
};

static DEFINE_SPINLOCK(ftrace_max_lock);
static raw_spinlock_t ftrace_max_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;

/*
* Copy the new maximum trace into the separate maximum-trace
Expand Down Expand Up @@ -335,7 +336,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
int i;

WARN_ON_ONCE(!irqs_disabled());
spin_lock(&ftrace_max_lock);
__raw_spin_lock(&ftrace_max_lock);
/* clear out all the previous traces */
for_each_possible_cpu(i) {
data = tr->data[i];
Expand All @@ -344,7 +345,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
}

__update_max_tr(tr, tsk, cpu);
spin_unlock(&ftrace_max_lock);
__raw_spin_unlock(&ftrace_max_lock);
}

/**
Expand All @@ -360,15 +361,15 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
int i;

WARN_ON_ONCE(!irqs_disabled());
spin_lock(&ftrace_max_lock);
__raw_spin_lock(&ftrace_max_lock);
for_each_possible_cpu(i)
tracing_reset(max_tr.data[i]);

flip_trace(max_tr.data[cpu], data);
tracing_reset(data);

__update_max_tr(tr, tsk, cpu);
spin_unlock(&ftrace_max_lock);
__raw_spin_unlock(&ftrace_max_lock);
}

int register_tracer(struct tracer *type)
Expand Down Expand Up @@ -652,13 +653,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
struct trace_entry *entry;
unsigned long irq_flags;

spin_lock_irqsave(&data->lock, irq_flags);
raw_local_irq_save(irq_flags);
__raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_FN;
entry->fn.ip = ip;
entry->fn.parent_ip = parent_ip;
spin_unlock_irqrestore(&data->lock, irq_flags);
__raw_spin_unlock(&data->lock);
raw_local_irq_restore(irq_flags);
}

void
Expand All @@ -678,14 +681,16 @@ __trace_special(void *__tr, void *__data,
struct trace_entry *entry;
unsigned long irq_flags;

spin_lock_irqsave(&data->lock, irq_flags);
raw_local_irq_save(irq_flags);
__raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, 0);
entry->type = TRACE_SPECIAL;
entry->special.arg1 = arg1;
entry->special.arg2 = arg2;
entry->special.arg3 = arg3;
spin_unlock_irqrestore(&data->lock, irq_flags);
__raw_spin_unlock(&data->lock);
raw_local_irq_restore(irq_flags);

trace_wake_up();
}
Expand Down Expand Up @@ -725,7 +730,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
struct trace_entry *entry;
unsigned long irq_flags;

spin_lock_irqsave(&data->lock, irq_flags);
raw_local_irq_save(irq_flags);
__raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_CTX;
Expand All @@ -736,7 +742,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->ctx.next_prio = next->prio;
entry->ctx.next_state = next->state;
__trace_stack(tr, data, flags, 4);
spin_unlock_irqrestore(&data->lock, irq_flags);
__raw_spin_unlock(&data->lock);
raw_local_irq_restore(irq_flags);
}

void
Expand All @@ -749,7 +756,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_entry *entry;
unsigned long irq_flags;

spin_lock_irqsave(&data->lock, irq_flags);
raw_local_irq_save(irq_flags);
__raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_WAKE;
Expand All @@ -760,7 +768,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->ctx.next_prio = wakee->prio;
entry->ctx.next_state = wakee->state;
__trace_stack(tr, data, flags, 5);
spin_unlock_irqrestore(&data->lock, irq_flags);
__raw_spin_unlock(&data->lock);
raw_local_irq_restore(irq_flags);

trace_wake_up();
}
Expand Down Expand Up @@ -1824,7 +1833,8 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (err)
goto err_unlock;

spin_lock_irq(&ftrace_max_lock);
raw_local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
for_each_possible_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
Expand All @@ -1839,7 +1849,8 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
atomic_dec(&global_trace.data[cpu]->disabled);
}
}
spin_unlock_irq(&ftrace_max_lock);
__raw_spin_unlock(&ftrace_max_lock);
raw_local_irq_enable();

tracing_cpumask = tracing_cpumask_new;

Expand Down Expand Up @@ -2299,7 +2310,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,

for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
spin_lock(&data->lock);
__raw_spin_lock(&data->lock);
}

while (find_next_entry_inc(iter) != NULL) {
Expand All @@ -2320,7 +2331,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,

for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
spin_unlock(&data->lock);
__raw_spin_unlock(&data->lock);
}

for_each_cpu_mask(cpu, mask) {
Expand Down Expand Up @@ -2538,17 +2549,15 @@ static int trace_alloc_page(void)
/* Now that we successfully allocate a page per CPU, add them */
for_each_possible_cpu(i) {
data = global_trace.data[i];
spin_lock_init(&data->lock);
lockdep_set_class(&data->lock, &data->lock_key);
data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
ClearPageLRU(page);

#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
spin_lock_init(&data->lock);
lockdep_set_class(&data->lock, &data->lock_key);
data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
Expand Down
2 changes: 1 addition & 1 deletion kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ struct trace_entry {
struct trace_array_cpu {
struct list_head trace_pages;
atomic_t disabled;
spinlock_t lock;
raw_spinlock_t lock;
struct lock_class_key lock_key;

/* these fields get copied into max-trace: */
Expand Down

0 comments on commit 92205c2

Please sign in to comment.