Skip to content

Commit

Permalink
percpu: make percpu symbols in tracer unique
Browse files Browse the repository at this point in the history
This patch updates percpu related symbols in kernel tracer such that
percpu symbols are unique and don't clash with local symbols.  This
serves two purposes of decreasing the possibility of global percpu
symbol collision and allowing dropping per_cpu__ prefix from percpu
symbols.

* kernel/trace/trace.c: s/max_data/max_tr_data/
* kernel/trace/trace_hw_branches: s/tracer/hwb_tracer/, s/buffer/hwb_buffer/

Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
  • Loading branch information
Tejun Heo committed Oct 29, 2009
1 parent 1871e52 commit 9705f69
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 27 deletions.
4 changes: 2 additions & 2 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
*/
static struct trace_array max_tr;

static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);

/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
Expand Down Expand Up @@ -4426,7 +4426,7 @@ __init static int tracer_alloc_buffers(void)
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_data, i);
max_tr.data[i] = &per_cpu(max_tr_data, i);
}

trace_init_cmdlines();
Expand Down
51 changes: 26 additions & 25 deletions kernel/trace/trace_hw_branches.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@

#define BTS_BUFFER_SIZE (1 << 13)

static DEFINE_PER_CPU(struct bts_tracer *, tracer);
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);

#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_tracer per_cpu(hwb_tracer, smp_processor_id())

static int trace_hw_branches_enabled __read_mostly;
static int trace_hw_branches_suspended __read_mostly;
Expand All @@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;

static void bts_trace_init_cpu(int cpu)
{
per_cpu(tracer, cpu) =
ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
NULL, (size_t)-1, BTS_KERNEL);
per_cpu(hwb_tracer, cpu) =
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
BTS_BUFFER_SIZE, NULL, (size_t)-1,
BTS_KERNEL);

if (IS_ERR(per_cpu(tracer, cpu)))
per_cpu(tracer, cpu) = NULL;
if (IS_ERR(per_cpu(hwb_tracer, cpu)))
per_cpu(hwb_tracer, cpu) = NULL;
}

static int bts_trace_init(struct trace_array *tr)
Expand All @@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
for_each_online_cpu(cpu) {
bts_trace_init_cpu(cpu);

if (likely(per_cpu(tracer, cpu)))
if (likely(per_cpu(hwb_tracer, cpu)))
trace_hw_branches_enabled = 1;
}
trace_hw_branches_suspended = 0;
Expand All @@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)

get_online_cpus();
for_each_online_cpu(cpu) {
if (likely(per_cpu(tracer, cpu))) {
ds_release_bts(per_cpu(tracer, cpu));
per_cpu(tracer, cpu) = NULL;
if (likely(per_cpu(hwb_tracer, cpu))) {
ds_release_bts(per_cpu(hwb_tracer, cpu));
per_cpu(hwb_tracer, cpu) = NULL;
}
}
trace_hw_branches_enabled = 0;
Expand All @@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)

get_online_cpus();
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_resume_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_resume_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 0;
put_online_cpus();
}
Expand All @@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)

get_online_cpus();
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_suspend_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 1;
put_online_cpus();
}
Expand All @@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
bts_trace_init_cpu(cpu);

if (trace_hw_branches_suspended &&
likely(per_cpu(tracer, cpu)))
ds_suspend_bts(per_cpu(tracer, cpu));
likely(per_cpu(hwb_tracer, cpu)))
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
}
break;

case CPU_DOWN_PREPARE:
/* The notification is sent with interrupts enabled. */
if (likely(per_cpu(tracer, cpu))) {
ds_release_bts(per_cpu(tracer, cpu));
per_cpu(tracer, cpu) = NULL;
if (likely(per_cpu(hwb_tracer, cpu))) {
ds_release_bts(per_cpu(hwb_tracer, cpu));
per_cpu(hwb_tracer, cpu) = NULL;
}
}

Expand Down Expand Up @@ -256,8 +257,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)

get_online_cpus();
for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_suspend_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
/*
* We need to collect the trace on the respective cpu since ftrace
* implicitly adds the record for the current cpu.
Expand All @@ -266,8 +267,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
on_each_cpu(trace_bts_cpu, iter->tr, 1);

for_each_online_cpu(cpu)
if (likely(per_cpu(tracer, cpu)))
ds_resume_bts(per_cpu(tracer, cpu));
if (likely(per_cpu(hwb_tracer, cpu)))
ds_resume_bts(per_cpu(hwb_tracer, cpu));
put_online_cpus();
}

Expand Down

0 comments on commit 9705f69

Please sign in to comment.