From ed079f28d6108d2d69b0c9ee1712f94734f2f2c8 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 18 Feb 2009 18:33:57 -0500 Subject: [PATCH] --- yaml --- r: 131542 b: refs/heads/master c: 0c5119c1e655e0719a69601b1049acdd5ec1c125 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/kernel/trace/trace_selftest.c | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 972074542dd8..9206144dcf53 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5b058bcde961bf28678a70e44c079107313543b6 +refs/heads/master: 0c5119c1e655e0719a69601b1049acdd5ec1c125 diff --git a/trunk/kernel/trace/trace_selftest.c b/trunk/kernel/trace/trace_selftest.c index 88c8eb70f54a..a7e0ef662f9f 100644 --- a/trunk/kernel/trace/trace_selftest.c +++ b/trunk/kernel/trace/trace_selftest.c @@ -57,11 +57,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) cnt = ring_buffer_entries(tr->buffer); + /* + * The trace_test_buffer_cpu runs a while loop to consume all data. + * If the calling tracer is broken, and is constantly filling + * the buffer, this will run forever, and hard lock the box. + * We disable the ring buffer while we do this test to prevent + * a hard lock up. + */ + tracing_off(); for_each_possible_cpu(cpu) { ret = trace_test_buffer_cpu(tr, cpu); if (ret) break; } + tracing_on(); __raw_spin_unlock(&ftrace_max_lock); local_irq_restore(flags);