Skip to content

Commit

Permalink
perf: Default PMU ops
Browse files Browse the repository at this point in the history
Provide default implementations for the pmu txn methods, this
allows us to remove some conditional code.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 9, 2010
1 parent 33696fc commit ad5133b
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 17 deletions.
10 changes: 5 additions & 5 deletions include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -565,8 +565,8 @@ struct pmu {

int *pmu_disable_count;

void (*pmu_enable) (struct pmu *pmu);
void (*pmu_disable) (struct pmu *pmu);
void (*pmu_enable) (struct pmu *pmu); /* optional */
void (*pmu_disable) (struct pmu *pmu); /* optional */

/*
* Should return -ENOENT when the @event doesn't match this PMU.
Expand All @@ -590,19 +590,19 @@ struct pmu {
* Start the transaction, after this ->enable() doesn't need to
* do schedulability tests.
*/
void (*start_txn) (struct pmu *pmu);
void (*start_txn) (struct pmu *pmu); /* optional */
/*
* If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
int (*commit_txn) (struct pmu *pmu);
int (*commit_txn) (struct pmu *pmu); /* optional */
/*
* Will cancel the transaction, assumes ->disable() is called
* for each successfull ->enable() during the transaction.
*/
void (*cancel_txn) (struct pmu *pmu);
void (*cancel_txn) (struct pmu *pmu); /* optional */
};

/**
Expand Down
64 changes: 52 additions & 12 deletions kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -674,21 +674,14 @@ group_sched_in(struct perf_event *group_event,
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
bool txn = false;

if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;

/* Check if group transaction availabe */
if (pmu->start_txn)
txn = true;

if (txn)
pmu->start_txn(pmu);
pmu->start_txn(pmu);

if (event_sched_in(group_event, cpuctx, ctx)) {
if (txn)
pmu->cancel_txn(pmu);
pmu->cancel_txn(pmu);
return -EAGAIN;
}

Expand All @@ -702,7 +695,7 @@ group_sched_in(struct perf_event *group_event,
}
}

if (!txn || !pmu->commit_txn(pmu))
if (!pmu->commit_txn(pmu))
return 0;

group_error:
Expand All @@ -717,8 +710,7 @@ group_sched_in(struct perf_event *group_event,
}
event_sched_out(group_event, cpuctx, ctx);

if (txn)
pmu->cancel_txn(pmu);
pmu->cancel_txn(pmu);

return -EAGAIN;
}
Expand Down Expand Up @@ -4965,6 +4957,31 @@ static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;

static void perf_pmu_nop_void(struct pmu *pmu)
{
}

static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}

static void perf_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
}

static int perf_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}

static void perf_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}

int perf_pmu_register(struct pmu *pmu)
{
int ret;
Expand All @@ -4974,6 +4991,29 @@ int perf_pmu_register(struct pmu *pmu)
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;

if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_void;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}

if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}

list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock:
Expand Down

0 comments on commit ad5133b

Please sign in to comment.