Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 158274
b: refs/heads/master
c: fa289be
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mackerras authored and Ingo Molnar committed Aug 25, 2009
1 parent e023376 commit 6e2d977
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 14 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 96d6e48bc6b38342a59ccd23e25907d12caaeaf8
refs/heads/master: fa289beca9de9119c7760bd984f3640da21bc94c
43 changes: 30 additions & 13 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,8 @@ static void update_counter_times(struct perf_counter *counter)
struct perf_counter_context *ctx = counter->ctx;
u64 run_end;

if (counter->state < PERF_COUNTER_STATE_INACTIVE)
if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
return;

counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
Expand Down Expand Up @@ -518,7 +519,7 @@ static void __perf_counter_disable(void *info)
*/
if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
update_context_time(ctx);
update_counter_times(counter);
update_group_times(counter);
if (counter == counter->group_leader)
group_sched_out(counter, cpuctx, ctx);
else
Expand Down Expand Up @@ -573,7 +574,7 @@ static void perf_counter_disable(struct perf_counter *counter)
* in, so we can change the state safely.
*/
if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
update_counter_times(counter);
update_group_times(counter);
counter->state = PERF_COUNTER_STATE_OFF;
}

Expand Down Expand Up @@ -850,6 +851,27 @@ perf_install_in_context(struct perf_counter_context *ctx,
spin_unlock_irq(&ctx->lock);
}

/*
* Put a counter into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
static void __perf_counter_mark_enabled(struct perf_counter *counter,
struct perf_counter_context *ctx)
{
struct perf_counter *sub;

counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
list_for_each_entry(sub, &counter->sibling_list, list_entry)
if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
sub->tstamp_enabled =
ctx->time - sub->total_time_enabled;
}

/*
* Cross CPU call to enable a performance counter
*/
Expand Down Expand Up @@ -877,8 +899,7 @@ static void __perf_counter_enable(void *info)

if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
goto unlock;
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
__perf_counter_mark_enabled(counter, ctx);

/*
* If the counter is in a group and isn't the group leader,
Expand Down Expand Up @@ -971,11 +992,9 @@ static void perf_counter_enable(struct perf_counter *counter)
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (counter->state == PERF_COUNTER_STATE_OFF) {
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled =
ctx->time - counter->total_time_enabled;
}
if (counter->state == PERF_COUNTER_STATE_OFF)
__perf_counter_mark_enabled(counter, ctx);

out:
spin_unlock_irq(&ctx->lock);
}
Expand Down Expand Up @@ -1479,9 +1498,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
counter->attr.enable_on_exec = 0;
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
continue;
counter->state = PERF_COUNTER_STATE_INACTIVE;
counter->tstamp_enabled =
ctx->time - counter->total_time_enabled;
__perf_counter_mark_enabled(counter, ctx);
enabled = 1;
}

Expand Down

0 comments on commit 6e2d977

Please sign in to comment.