Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140795
b: refs/heads/master
c: d7350c3
h: refs/heads/master
i:
  140793: 418d5ba
  140791: f44b2a0
v: v3
  • Loading branch information
Frederic Weisbecker authored and Ingo Molnar committed Feb 25, 2009
1 parent aab530b commit 21f84a6
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b04cc6b1f6398b0e0b60d37e27ce51b4899672ec
refs/heads/master: d7350c3f45694104e820041969c8185c5f99e57c
101 changes: 83 additions & 18 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1294,20 +1294,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
return ent;
}

/*
* No necessary locking here. The worst thing which can
* happen is loosing events consumed at the same time
* by a trace_pipe reader.
* Other than that, we don't risk to crash the ring buffer
* because it serializes the readers.
*
* The current tracer is copied to avoid a global locking
* all around.
*/
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
static struct tracer *old_tracer;
int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
int cpu;

/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);

if (!current_trace || current_trace != iter->trace) {
mutex_unlock(&trace_types_lock);
return NULL;
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);

atomic_inc(&trace_record_cmdline_disabled);

Expand Down Expand Up @@ -1341,7 +1353,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
static void s_stop(struct seq_file *m, void *p)
{
atomic_dec(&trace_record_cmdline_disabled);
mutex_unlock(&trace_types_lock);
}

static void print_lat_help_header(struct seq_file *m)
Expand Down Expand Up @@ -1691,13 +1702,25 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
goto out;
}

/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
mutex_lock(&trace_types_lock);
iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace) {
*ret = -ENOMEM;
goto fail;
}
if (current_trace)
*iter->trace = *current_trace;

if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
iter->tr = &global_trace;
iter->trace = current_trace;
iter->pos = -1;
mutex_init(&iter->mutex);
iter->cpu_file = cpu_file;

/* Notify the tracer early; before we stop tracing. */
Expand Down Expand Up @@ -1747,8 +1770,9 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
fail:
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
kfree(iter);

return ERR_PTR(-ENOMEM);
Expand Down Expand Up @@ -1783,6 +1807,8 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);

seq_release(inode, file);
mutex_destroy(&iter->mutex);
kfree(iter->trace);
kfree(iter);
return 0;
}
Expand Down Expand Up @@ -2392,18 +2418,29 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
goto out;
}

/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace) {
ret = -ENOMEM;
goto fail;
}
if (current_trace)
*iter->trace = *current_trace;

if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
kfree(iter);
ret = -ENOMEM;
goto out;
goto fail;
}

/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);

iter->cpu_file = cpu_file;
iter->tr = &global_trace;
iter->trace = current_trace;
mutex_init(&iter->mutex);
filp->private_data = iter;

if (iter->trace->pipe_open)
Expand All @@ -2412,6 +2449,12 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
out:
mutex_unlock(&trace_types_lock);
return ret;

fail:
kfree(iter->trace);
kfree(iter);
mutex_unlock(&trace_types_lock);
return ret;
}

static int tracing_release_pipe(struct inode *inode, struct file *file)
Expand All @@ -2428,6 +2471,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);

free_cpumask_var(iter->started);
mutex_destroy(&iter->mutex);
kfree(iter->trace);
kfree(iter);

return 0;
Expand Down Expand Up @@ -2497,18 +2542,15 @@ static int tracing_wait_pipe(struct file *filp)
return -EAGAIN;
}

mutex_unlock(&trace_types_lock);
mutex_unlock(&iter->mutex);

iter->trace->wait_pipe(iter);

mutex_lock(&trace_types_lock);
mutex_lock(&iter->mutex);

if (signal_pending(current))
return -EINTR;

if (iter->trace != current_trace)
return 0;

/*
* We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
Expand All @@ -2533,6 +2575,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_iterator *iter = filp->private_data;
static struct tracer *old_tracer;
ssize_t sret;

/* return any leftover data */
Expand All @@ -2542,7 +2585,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,

trace_seq_reset(&iter->seq);

/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);

/*
* Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself
* is protected.
*/
mutex_lock(&iter->mutex);
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
Expand Down Expand Up @@ -2599,7 +2655,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
goto waitagain;

out:
mutex_unlock(&trace_types_lock);
mutex_unlock(&iter->mutex);

return sret;
}
Expand Down Expand Up @@ -2676,11 +2732,20 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
};
static struct tracer *old_tracer;
ssize_t ret;
size_t rem;
unsigned int i;

/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);

mutex_lock(&iter->mutex);

if (iter->trace->splice_read) {
ret = iter->trace->splice_read(iter, filp,
Expand Down Expand Up @@ -2720,14 +2785,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
trace_seq_reset(&iter->seq);
}

mutex_unlock(&trace_types_lock);
mutex_unlock(&iter->mutex);

spd.nr_pages = i;

return splice_to_pipe(pipe, &spd);

out_err:
mutex_unlock(&trace_types_lock);
mutex_unlock(&iter->mutex);

return ret;
}
Expand Down
3 changes: 2 additions & 1 deletion trunk/kernel/trace/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -405,8 +405,9 @@ struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
void *private;
struct ring_buffer_iter *buffer_iter[NR_CPUS];
int cpu_file;
struct mutex mutex;
struct ring_buffer_iter *buffer_iter[NR_CPUS];

/* The below is zeroed out in pipe_read */
struct trace_seq seq;
Expand Down

0 comments on commit 21f84a6

Please sign in to comment.