Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191299
b: refs/heads/master
c: 6bde9b6
h: refs/heads/master
i:
  191297: e75bfca
  191295: 0d5e83c
v: v3
  • Loading branch information
Lin Ming authored and Ingo Molnar committed May 7, 2010
1 parent ac436a1 commit eccd9e4
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ab608344bcbde4f55ec4cd911b686b0ce3eae076
refs/heads/master: 6bde9b6ce0127e2a56228a2071536d422be31336
15 changes: 12 additions & 3 deletions trunk/include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,8 @@ struct hw_perf_event {

struct perf_event;

#define PERF_EVENT_TXN_STARTED 1

/**
* struct pmu - generic performance monitoring unit
*/
Expand All @@ -557,6 +559,16 @@ struct pmu {
void (*stop) (struct perf_event *event);
void (*read) (struct perf_event *event);
void (*unthrottle) (struct perf_event *event);

/*
* group events scheduling is treated as a transaction,
* add group events as a whole and perform one schedulability test.
* If test fails, roll back the whole group
*/

void (*start_txn) (const struct pmu *pmu);
void (*cancel_txn) (const struct pmu *pmu);
int (*commit_txn) (const struct pmu *pmu);
};

/**
Expand Down Expand Up @@ -823,9 +835,6 @@ extern void perf_disable(void);
extern void perf_enable(void);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
Expand Down
33 changes: 20 additions & 13 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
void __weak hw_perf_disable(void) { barrier(); }
void __weak hw_perf_enable(void) { barrier(); }

int __weak
hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
return 0;
}

void __weak perf_event_print_debug(void) { }

static DEFINE_PER_CPU(int, perf_disable_count);
Expand Down Expand Up @@ -644,15 +636,20 @@ group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group;
struct perf_event *event, *partial_group = NULL;
const struct pmu *pmu = group_event->pmu;
bool txn = false;
int ret;

if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;

ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
if (ret)
return ret < 0 ? ret : 0;
/* Check if group transaction availabe */
if (pmu->start_txn)
txn = true;

if (txn)
pmu->start_txn(pmu);

if (event_sched_in(group_event, cpuctx, ctx))
return -EAGAIN;
Expand All @@ -667,9 +664,19 @@ group_sched_in(struct perf_event *group_event,
}
}

return 0;
if (txn) {
ret = pmu->commit_txn(pmu);
if (!ret) {
pmu->cancel_txn(pmu);

return 0;
}
}

group_error:
if (txn)
pmu->cancel_txn(pmu);

/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
Expand Down

0 comments on commit eccd9e4

Please sign in to comment.