Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 205245
b: refs/heads/master
c: 8d2cacb
h: refs/heads/master
i:
  205243: c254545
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 9, 2010
1 parent 3ec71d6 commit d4b7b7a
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3af9e859281bda7eb7c20b51879cf43aa788ac2e
refs/heads/master: 8d2cacbbb8deadfae78aa16e4e1ee619bdd7019e
7 changes: 4 additions & 3 deletions trunk/arch/powerpc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event)
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
if (cpuhw->group_flag & PERF_EVENT_TXN)
goto nocheck;

if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
Expand Down Expand Up @@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}

Expand All @@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
cpuhw->group_flag &= ~PERF_EVENT_TXN;
}

/*
Expand All @@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu)
for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];

cpuhw->group_flag &= ~PERF_EVENT_TXN;
return 0;
}

Expand Down
7 changes: 4 additions & 3 deletions trunk/arch/sparc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event)
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
if (cpuc->group_flag & PERF_EVENT_TXN)
goto nocheck;

if (check_excludes(cpuc->event, n0, 1))
Expand Down Expand Up @@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
cpuhw->group_flag |= PERF_EVENT_TXN;
}

/*
Expand All @@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
cpuhw->group_flag &= ~PERF_EVENT_TXN;
}

/*
Expand All @@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN;

cpuc->group_flag &= ~PERF_EVENT_TXN;
return 0;
}

Expand Down
14 changes: 5 additions & 9 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event)
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
if (cpuc->group_flag & PERF_EVENT_TXN)
goto out;

ret = x86_pmu.schedule_events(cpuc, n, assign);
Expand Down Expand Up @@ -1096,7 +1096,7 @@ static void x86_pmu_disable(struct perf_event *event)
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
*/
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
if (cpuc->group_flag & PERF_EVENT_TXN)
return;

x86_pmu_stop(event);
Expand Down Expand Up @@ -1388,7 +1388,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
cpuc->group_flag |= PERF_EVENT_TXN;
cpuc->n_txn = 0;
}

Expand All @@ -1401,7 +1401,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
cpuc->group_flag &= ~PERF_EVENT_TXN;
/*
* Truncate the collected events.
*/
Expand Down Expand Up @@ -1435,11 +1435,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
*/
memcpy(cpuc->assign, assign, n*sizeof(int));

/*
* Clear out the txn count so that ->cancel_txn() which gets
* run after ->commit_txn() doesn't undo things.
*/
cpuc->n_txn = 0;
cpuc->group_flag &= ~PERF_EVENT_TXN;

return 0;
}
Expand Down
27 changes: 22 additions & 5 deletions trunk/include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,10 @@ struct hw_perf_event {

struct perf_event;

#define PERF_EVENT_TXN_STARTED 1
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
#define PERF_EVENT_TXN 0x1

/**
* struct pmu - generic performance monitoring unit
Expand All @@ -563,14 +566,28 @@ struct pmu {
void (*unthrottle) (struct perf_event *event);

/*
* group events scheduling is treated as a transaction,
* add group events as a whole and perform one schedulability test.
* If test fails, roll back the whole group
* Group events scheduling is treated as a transaction, add group
* events as a whole and perform one schedulability test. If the test
* fails, roll back the whole group
*/

/*
* Start the transaction, after this ->enable() doesn't need
* to do schedulability tests.
*/
void (*start_txn) (const struct pmu *pmu);
void (*cancel_txn) (const struct pmu *pmu);
/*
* If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
int (*commit_txn) (const struct pmu *pmu);
/*
* Will cancel the transaction, assumes ->disable() is called for
* each successfull ->enable() during the transaction.
*/
void (*cancel_txn) (const struct pmu *pmu);
};

/**
Expand Down
9 changes: 1 addition & 8 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event,
struct perf_event *event, *partial_group = NULL;
const struct pmu *pmu = group_event->pmu;
bool txn = false;
int ret;

if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
Expand Down Expand Up @@ -703,15 +702,9 @@ group_sched_in(struct perf_event *group_event,
}
}

if (!txn)
if (!txn || !pmu->commit_txn(pmu))
return 0;

ret = pmu->commit_txn(pmu);
if (!ret) {
pmu->cancel_txn(pmu);
return 0;
}

group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
Expand Down

0 comments on commit d4b7b7a

Please sign in to comment.