Skip to content

Commit

Permalink
Merge tag 'trace-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/ker…
Browse files Browse the repository at this point in the history
…nel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Various fix-ups:

   - comment fixes

   - build fix

   - better memory alloction (don't use NR_CPUS)

   - configuration fix

   - build warning fix

   - enhanced callback parameter (to simplify users of trace hooks)

   - give up on stack tracing when RCU isn't watching (it's a lost
     cause)"

* tag 'trace-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Have stack trace not record if RCU is not watching
  tracing: Pass export pointer as argument to ->write()
  ring-buffer: Remove unused function __rb_data_page_index()
  tracing: make PREEMPTIRQ_EVENTS depend on TRACING
  tracing: Allocate mask_str buffer dynamically
  tracing: always define trace_{irq,preempt}_{enable_disable}
  tracing: Fix code comments in trace.c
  • Loading branch information
Linus Torvalds committed Dec 15, 2017
2 parents c4f988e + b00d607 commit 0424378
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 39 deletions.
6 changes: 4 additions & 2 deletions drivers/hwtracing/stm/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,11 @@ static struct stm_ftrace {
* @len: length of the data packet
*/
static void notrace
stm_ftrace_write(const void *buf, unsigned int len)
stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);

stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
}

static int stm_ftrace_link(struct stm_source_data *data)
Expand Down
2 changes: 1 addition & 1 deletion include/linux/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
*/
struct trace_export {
struct trace_export __rcu *next;
void (*write)(const void *, unsigned int);
void (*write)(struct trace_export *, const void *, unsigned int);
};

int register_ftrace_export(struct trace_export *export);
Expand Down
11 changes: 7 additions & 4 deletions include/trace/events/preemptirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,

#include <trace/define_trace.h>

#else /* !CONFIG_PREEMPTIRQ_EVENTS */
#endif /* !CONFIG_PREEMPTIRQ_EVENTS */

#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
#define trace_irq_enable(...)
#define trace_irq_disable(...)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_irq_enable_rcuidle(...)
#define trace_irq_disable_rcuidle(...)
#endif

#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_preempt_enable_rcuidle(...)
#define trace_preempt_disable_rcuidle(...)

#endif
1 change: 1 addition & 0 deletions kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
bool "Enable trace events for preempt and irq disable/enable"
select TRACE_IRQFLAGS
depends on DEBUG_PREEMPT || !PROVE_LOCKING
depends on TRACING
default n
help
Enable tracing of disable and enable events for preemption and irqs.
Expand Down
6 changes: 0 additions & 6 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
}
EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);

static __always_inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{
return bpage->data + index;
}

static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
{
return bpage->page->data + index;
Expand Down
41 changes: 15 additions & 26 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
}

/**
* trace_pid_filter_add_remove - Add or remove a task from a pid_list
* trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
* @pid_list: The list to modify
* @self: The current task for fork or NULL for exit
* @task: The task to add or remove
Expand Down Expand Up @@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
}

/**
* trace_snapshot - take a snapshot of the current buffer.
* tracing_snapshot - take a snapshot of the current buffer.
*
* This causes a swap between the snapshot buffer and the current live
* tracing buffer. You can use this to take snapshots of the live
Expand Down Expand Up @@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);

/**
* trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
* tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
*
* This is similar to trace_snapshot(), but it will allocate the
* This is similar to tracing_snapshot(), but it will allocate the
* snapshot buffer if it isn't already allocated. Use this only
* where it is safe to sleep, as the allocation may sleep.
*
Expand Down Expand Up @@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh;
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/debug/tracing/latency_trace)
* for later retrieval via /sys/kernel/tracing/tracing_max_latency)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
Expand Down Expand Up @@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export,

entry = ring_buffer_event_data(event);
size = ring_buffer_event_length(event);
export->write(entry, size);
export->write(export, entry, size);
}

static DEFINE_MUTEX(ftrace_export_lock);
Expand Down Expand Up @@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = {
.llseek = seq_lseek,
};

/*
* The tracer itself will not take this lock, but still we want
* to provide a consistent cpumask to user-space:
*/
static DEFINE_MUTEX(tracing_cpumask_update_lock);

/*
* Temporary storage for the character representation of the
* CPU bitmask (and one more byte for the newline):
*/
static char mask_str[NR_CPUS + 1];

static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct trace_array *tr = file_inode(filp)->i_private;
char *mask_str;
int len;

mutex_lock(&tracing_cpumask_update_lock);
len = snprintf(NULL, 0, "%*pb\n",
cpumask_pr_args(tr->tracing_cpumask)) + 1;
mask_str = kmalloc(len, GFP_KERNEL);
if (!mask_str)
return -ENOMEM;

len = snprintf(mask_str, count, "%*pb\n",
len = snprintf(mask_str, len, "%*pb\n",
cpumask_pr_args(tr->tracing_cpumask));
if (len >= count) {
count = -EINVAL;
goto out_err;
}
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);

out_err:
mutex_unlock(&tracing_cpumask_update_lock);
kfree(mask_str);

return count;
}
Expand All @@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
if (err)
goto err_unlock;

mutex_lock(&tracing_cpumask_update_lock);

local_irq_disable();
arch_spin_lock(&tr->max_lock);
for_each_tracing_cpu(cpu) {
Expand All @@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
local_irq_enable();

cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);

mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new);

return count;
Expand Down
4 changes: 4 additions & 0 deletions kernel/trace/trace_stack.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (__this_cpu_read(disable_stack_tracer) != 1)
goto out;

/* If rcu is not watching, then save stack trace can fail */
if (!rcu_is_watching())
goto out;

ip += MCOUNT_INSN_SIZE;

check_stack(ip, &stack);
Expand Down

0 comments on commit 0424378

Please sign in to comment.