Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 211936
b: refs/heads/master
c: 8e5fc1a
h: refs/heads/master
v: v3
  • Loading branch information
Stephane Eranian authored and Ingo Molnar committed Oct 18, 2010
1 parent 138f41d commit 29112ba
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 14 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ba0cef3d149ce4db293c572bf36ed352b11ce7b9
refs/heads/master: 8e5fc1a7320baf6076391607515dceb61319b36a
76 changes: 63 additions & 13 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -412,8 +412,8 @@ event_filter_match(struct perf_event *event)
return event->cpu == -1 || event->cpu == smp_processor_id();
}

static void
event_sched_out(struct perf_event *event,
static int
__event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
Expand All @@ -432,14 +432,13 @@ event_sched_out(struct perf_event *event,
}

if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
return 0;

event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
event->tstamp_stopped = ctx->time;
event->pmu->del(event, 0);
event->oncpu = -1;

Expand All @@ -448,6 +447,19 @@ event_sched_out(struct perf_event *event,
ctx->nr_active--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
return 1;
}

static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
int ret;

ret = __event_sched_out(event, cpuctx, ctx);
if (ret)
event->tstamp_stopped = ctx->time;
}

static void
Expand Down Expand Up @@ -647,7 +659,7 @@ void perf_event_disable(struct perf_event *event)
}

static int
event_sched_in(struct perf_event *event,
__event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
Expand All @@ -667,8 +679,6 @@ event_sched_in(struct perf_event *event,
return -EAGAIN;
}

event->tstamp_running += ctx->time - event->tstamp_stopped;

if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
Expand All @@ -679,6 +689,35 @@ event_sched_in(struct perf_event *event,
return 0;
}

static inline int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
int ret = __event_sched_in(event, cpuctx, ctx);
if (ret)
return ret;
event->tstamp_running += ctx->time - event->tstamp_stopped;
return 0;
}

static void
group_commit_event_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
u64 now = ctx->time;

group_event->tstamp_running += now - group_event->tstamp_stopped;
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
event->tstamp_running += now - event->tstamp_stopped;
}
}

static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
Expand All @@ -692,7 +731,13 @@ group_sched_in(struct perf_event *group_event,

pmu->start_txn(pmu);

if (event_sched_in(group_event, cpuctx, ctx)) {
/*
* use __event_sched_in() to delay updating tstamp_running
* until the transaction is committed. In case of failure
* we will keep an unmodified tstamp_running which is a
* requirement to get correct timing information
*/
if (__event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
return -EAGAIN;
}
Expand All @@ -701,26 +746,31 @@ group_sched_in(struct perf_event *group_event,
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event_sched_in(event, cpuctx, ctx)) {
if (__event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}

if (!pmu->commit_txn(pmu))
if (!pmu->commit_txn(pmu)) {
/* commit tstamp_running */
group_commit_event_sched_in(group_event, cpuctx, ctx);
return 0;

}
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
*
* use __event_sched_out() to avoid updating tstamp_stopped
* because the event never actually ran
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
break;
event_sched_out(event, cpuctx, ctx);
__event_sched_out(event, cpuctx, ctx);
}
event_sched_out(group_event, cpuctx, ctx);
__event_sched_out(group_event, cpuctx, ctx);

pmu->cancel_txn(pmu);

Expand Down

0 comments on commit 29112ba

Please sign in to comment.