From 275794c2c57add9ad05932c72f9319b325fe36a4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Mar 2010 14:50:43 +0800 Subject: [PATCH] --- yaml --- r: 187869 b: refs/heads/master c: 52fbe9cde7fdb5c6fac196d7ebd2d92d05ef3cd4 h: refs/heads/master i: 187867: c485543d03b959ccbfaa7dbbabea71a927aa87ff v: v3 --- [refs] | 2 +- trunk/kernel/trace/ring_buffer.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index 10aa4a8d345a..24de976baa9a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 915a0b575fdb2376135ed9334b3ccb1eb51db622 +refs/heads/master: 52fbe9cde7fdb5c6fac196d7ebd2d92d05ef3cd4 diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index 8c1b2d290718..54191d6ed195 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -2232,12 +2232,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) if (ring_buffer_flags != RB_BUFFERS_ON) return NULL; - if (atomic_read(&buffer->record_disabled)) - return NULL; - /* If we are tracing schedule, we don't want to recurse */ resched = ftrace_preempt_disable(); + if (atomic_read(&buffer->record_disabled)) + goto out_nocheck; + if (trace_recursive_lock()) goto out_nocheck; @@ -2469,11 +2469,11 @@ int ring_buffer_write(struct ring_buffer *buffer, if (ring_buffer_flags != RB_BUFFERS_ON) return -EBUSY; - if (atomic_read(&buffer->record_disabled)) - return -EBUSY; - resched = ftrace_preempt_disable(); + if (atomic_read(&buffer->record_disabled)) + goto out; + cpu = raw_smp_processor_id(); if (!cpumask_test_cpu(cpu, buffer->cpumask))