Skip to content

Commit

Permalink
perf_counter: Fix inheritance cleanup code
Browse files Browse the repository at this point in the history
Clean up code that open-coded the list_{add,del}_counter() code in
__perf_counter_exit_task() which consequently diverged. This could
lead to software counter crashes.

Also, fold the ctx->nr_counter inc/dec into those functions and clean
up some of the related code.

[ Impact: fix potential sw counter crash, cleanup ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed May 17, 2009
1 parent 0bbd0d4 commit 8bc2095
Showing 1 changed file with 15 additions and 17 deletions.
32 changes: 15 additions & 17 deletions kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,13 +115,16 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
}

list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
}

static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
struct perf_counter *sibling, *tmp;

ctx->nr_counters--;

list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);

Expand Down Expand Up @@ -209,7 +212,6 @@ static void __perf_counter_remove_from_context(void *info)
counter_sched_out(counter, cpuctx, ctx);

counter->task = NULL;
ctx->nr_counters--;

/*
* Protect the list operation against NMI by disabling the
Expand Down Expand Up @@ -276,7 +278,6 @@ static void perf_counter_remove_from_context(struct perf_counter *counter)
* succeed.
*/
if (!list_empty(&counter->list_entry)) {
ctx->nr_counters--;
list_del_counter(counter, ctx);
counter->task = NULL;
}
Expand Down Expand Up @@ -544,7 +545,6 @@ static void add_counter_to_ctx(struct perf_counter *counter,
struct perf_counter_context *ctx)
{
list_add_counter(counter, ctx);
ctx->nr_counters++;
counter->prev_state = PERF_COUNTER_STATE_OFF;
counter->tstamp_enabled = ctx->time;
counter->tstamp_running = ctx->time;
Expand Down Expand Up @@ -3206,9 +3206,8 @@ static int inherit_group(struct perf_counter *parent_counter,
static void sync_child_counter(struct perf_counter *child_counter,
struct perf_counter *parent_counter)
{
u64 parent_val, child_val;
u64 child_val;

parent_val = atomic64_read(&parent_counter->count);
child_val = atomic64_read(&child_counter->count);

/*
Expand Down Expand Up @@ -3240,7 +3239,6 @@ __perf_counter_exit_task(struct task_struct *child,
struct perf_counter_context *child_ctx)
{
struct perf_counter *parent_counter;
struct perf_counter *sub, *tmp;

/*
* If we do not self-reap then we have to wait for the
Expand All @@ -3252,8 +3250,8 @@ __perf_counter_exit_task(struct task_struct *child,
*/
if (child != current) {
wait_task_inactive(child, 0);
list_del_init(&child_counter->list_entry);
update_counter_times(child_counter);
list_del_counter(child_counter, child_ctx);
} else {
struct perf_cpu_context *cpuctx;
unsigned long flags;
Expand All @@ -3272,9 +3270,7 @@ __perf_counter_exit_task(struct task_struct *child,
group_sched_out(child_counter, cpuctx, child_ctx);
update_counter_times(child_counter);

list_del_init(&child_counter->list_entry);

child_ctx->nr_counters--;
list_del_counter(child_counter, child_ctx);

perf_enable();
local_irq_restore(flags);
Expand All @@ -3288,13 +3284,6 @@ __perf_counter_exit_task(struct task_struct *child,
*/
if (parent_counter) {
sync_child_counter(child_counter, parent_counter);
list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
list_entry) {
if (sub->parent) {
sync_child_counter(sub, sub->parent);
free_counter(sub);
}
}
free_counter(child_counter);
}
}
Expand All @@ -3315,9 +3304,18 @@ void perf_counter_exit_task(struct task_struct *child)
if (likely(!child_ctx->nr_counters))
return;

again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry)
__perf_counter_exit_task(child, child_counter, child_ctx);

/*
* If the last counter was a group counter, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
if (!list_empty(&child_ctx->counter_list))
goto again;
}

/*
Expand Down

0 comments on commit 8bc2095

Please sign in to comment.