Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 199729
b: refs/heads/master
c: 90151c3
h: refs/heads/master
i:
  199727: 9155476
v: v3
  • Loading branch information
Stephane Eranian authored and Ingo Molnar committed May 31, 2010
1 parent 8480bdf commit 07f8024
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 5 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2e97942fe57864588774f173cf4cd7bb68968b76
refs/heads/master: 90151c35b19633e0cab5a6c80f1ba4a51e7c913b
22 changes: 22 additions & 0 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ struct cpu_hw_events {

int n_events;
int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Expand Down Expand Up @@ -983,6 +984,7 @@ static int x86_pmu_enable(struct perf_event *event)
out:
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;

return 0;
}
Expand Down Expand Up @@ -1089,6 +1091,14 @@ static void x86_pmu_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int i;

/*
* If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
*/
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
return;

x86_pmu_stop(event);

for (i = 0; i < cpuc->n_events; i++) {
Expand Down Expand Up @@ -1379,6 +1389,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
cpuc->n_txn = 0;
}

/*
Expand All @@ -1391,6 +1402,11 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
/*
* Truncate the collected events.
*/
cpuc->n_added -= cpuc->n_txn;
cpuc->n_events -= cpuc->n_txn;
}

/*
Expand Down Expand Up @@ -1419,6 +1435,12 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
*/
memcpy(cpuc->assign, assign, n*sizeof(int));

/*
* Clear out the txn count so that ->cancel_txn() which gets
* run after ->commit_txn() doesn't undo things.
*/
cpuc->n_txn = 0;

return 0;
}

Expand Down
11 changes: 7 additions & 4 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -687,8 +687,11 @@ group_sched_in(struct perf_event *group_event,
if (txn)
pmu->start_txn(pmu);

if (event_sched_in(group_event, cpuctx, ctx))
if (event_sched_in(group_event, cpuctx, ctx)) {
if (txn)
pmu->cancel_txn(pmu);
return -EAGAIN;
}

/*
* Schedule in siblings as one group (if any):
Expand All @@ -710,9 +713,6 @@ group_sched_in(struct perf_event *group_event,
}

group_error:
if (txn)
pmu->cancel_txn(pmu);

/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
Expand All @@ -724,6 +724,9 @@ group_sched_in(struct perf_event *group_event,
}
event_sched_out(group_event, cpuctx, ctx);

if (txn)
pmu->cancel_txn(pmu);

return -EAGAIN;
}

Expand Down

0 comments on commit 07f8024

Please sign in to comment.