Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191348
b: refs/heads/master
c: b09e019
h: refs/heads/master
v: v3
  • Loading branch information
Arnaldo Carvalho de Melo committed May 11, 2010
1 parent 8ffcbc1 commit edd5e83
Show file tree
Hide file tree
Showing 8 changed files with 167 additions and 183 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8e6d5573af55435160d329f6ae3fe16a0abbdaec
refs/heads/master: b09e0190acf88c7fe3b05e3c331e1b2ef5310896
129 changes: 61 additions & 68 deletions trunk/arch/powerpc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,6 @@ struct cpu_hw_events {
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];

unsigned int group_flag;
int n_txn_start;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);

Expand Down Expand Up @@ -721,6 +718,66 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}

static void event_sched_in(struct perf_event *event)
{
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
if (is_software_event(event))
event->pmu->enable(event);
}

/*
* Called to enable a whole group of events.
* Returns 1 if the group was enabled, or -EAGAIN if it could not be.
* Assumes the caller has disabled interrupts and has
* frozen the PMU with hw_perf_save_disable.
*/
int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct cpu_hw_events *cpuhw;
long i, n, n0;
struct perf_event *sub;

if (!ppmu)
return 0;
cpuhw = &__get_cpu_var(cpu_hw_events);
n0 = cpuhw->n_events;
n = collect_events(group_leader, ppmu->n_counter - n0,
&cpuhw->event[n0], &cpuhw->events[n0],
&cpuhw->flags[n0]);
if (n < 0)
return -EAGAIN;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
if (i < 0)
return -EAGAIN;
cpuhw->n_events = n0 + n;
cpuhw->n_added += n;

/*
* OK, this group can go on; update event states etc.,
* and enable any software events
*/
for (i = n0; i < n0 + n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuctx->active_oncpu += n;
n = 1;
event_sched_in(group_leader);
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
if (sub->state != PERF_EVENT_STATE_OFF) {
event_sched_in(sub);
++n;
}
}
ctx->nr_active += n;

return 1;
}

/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
Expand Down Expand Up @@ -748,22 +805,12 @@ static int power_pmu_enable(struct perf_event *event)
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;

/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
goto nocheck;

if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
event->hw.config = cpuhw->events[n0];

nocheck:
event->hw.config = cpuhw->events[n0];
++cpuhw->n_events;
++cpuhw->n_added;

Expand Down Expand Up @@ -849,65 +896,11 @@ static void power_pmu_unthrottle(struct perf_event *event)
local_irq_restore(flags);
}

/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
void power_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
cpuhw->n_txn_start = cpuhw->n_events;
}

/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
void power_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
}

/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
int power_pmu_commit_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;

if (!ppmu)
return -EAGAIN;
cpuhw = &__get_cpu_var(cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
if (i < 0)
return -EAGAIN;

for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];

return 0;
}

struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};

/*
Expand Down
45 changes: 16 additions & 29 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,18 +255,6 @@ static void update_event_times(struct perf_event *event)
event->total_time_running = run_end - event->tstamp_running;
}

/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;

update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}

static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
Expand Down Expand Up @@ -320,6 +308,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_event *sibling, *tmp;

if (list_empty(&event->group_entry))
return;
ctx->nr_events--;
Expand All @@ -332,7 +322,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
if (event->group_leader != event)
event->group_leader->nr_siblings--;

update_group_times(event);
update_event_times(event);

/*
* If event was in error state, then keep it
Expand All @@ -343,12 +333,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
}

static void
perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_event *sibling, *tmp;

/*
* If this was a group event with sibling events then
Expand Down Expand Up @@ -513,6 +497,18 @@ static void perf_event_remove_from_context(struct perf_event *event)
raw_spin_unlock_irq(&ctx->lock);
}

/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;

update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}

/*
* Cross CPU call to disable a performance event
*/
Expand Down Expand Up @@ -1872,12 +1868,6 @@ int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;

/*
* Remove from the PMU, can't get re-enabled since we got
* here because the last ref went.
*/
perf_event_disable(event);

WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
Expand All @@ -1892,10 +1882,7 @@ int perf_event_release_kernel(struct perf_event *event)
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
raw_spin_lock_irq(&ctx->lock);
list_del_event(event, ctx);
perf_destroy_group(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
perf_event_remove_from_context(event);
mutex_unlock(&ctx->mutex);

mutex_lock(&event->owner->perf_event_mutex);
Expand Down
5 changes: 1 addition & 4 deletions trunk/tools/perf/builtin-report.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,10 +301,7 @@ static int __cmd_report(void)
hists__collapse_resort(hists);
hists__output_resort(hists);
if (use_browser)
perf_session__browse_hists(&hists->entries,
hists->nr_entries,
hists->stats.total, help,
input_name);
hists__browse(hists, help, input_name);
else {
if (rb_first(&session->hists.entries) ==
rb_last(&session->hists.entries))
Expand Down
59 changes: 59 additions & 0 deletions trunk/tools/perf/util/hist.c
Original file line number Diff line number Diff line change
Expand Up @@ -784,3 +784,62 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,

return ret;
}

enum hist_filter {
HIST_FILTER__DSO,
HIST_FILTER__THREAD,
};

void hists__filter_by_dso(struct hists *self, const struct dso *dso)
{
struct rb_node *nd;

self->nr_entries = self->stats.total = 0;
self->max_sym_namelen = 0;

for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

if (symbol_conf.exclude_other && !h->parent)
continue;

if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
h->filtered |= (1 << HIST_FILTER__DSO);
continue;
}

h->filtered &= ~(1 << HIST_FILTER__DSO);
if (!h->filtered) {
++self->nr_entries;
self->stats.total += h->count;
if (h->ms.sym &&
self->max_sym_namelen < h->ms.sym->namelen)
self->max_sym_namelen = h->ms.sym->namelen;
}
}
}

void hists__filter_by_thread(struct hists *self, const struct thread *thread)
{
struct rb_node *nd;

self->nr_entries = self->stats.total = 0;
self->max_sym_namelen = 0;

for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

if (thread != NULL && h->thread != thread) {
h->filtered |= (1 << HIST_FILTER__THREAD);
continue;
}
h->filtered &= ~(1 << HIST_FILTER__THREAD);
if (!h->filtered) {
++self->nr_entries;
self->stats.total += h->count;
if (h->ms.sym &&
self->max_sym_namelen < h->ms.sym->namelen)
self->max_sym_namelen = h->ms.sym->namelen;
}
}
}
15 changes: 15 additions & 0 deletions trunk/tools/perf/util/hist.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,19 @@ void hists__output_resort(struct hists *self);
void hists__collapse_resort(struct hists *self);
size_t hists__fprintf(struct hists *self, struct hists *pair,
bool show_displacement, FILE *fp);

void hists__filter_by_dso(struct hists *self, const struct dso *dso);
void hists__filter_by_thread(struct hists *self, const struct thread *thread);

#ifdef NO_NEWT_SUPPORT
static inline int hists__browse(struct hists self __used,
const char *helpline __used,
const char *input_name __used)
{
return 0;
}
#else
int hists__browse(struct hists *self, const char *helpline,
const char *input_name);
#endif
#endif /* __PERF_HIST_H */
Loading

0 comments on commit edd5e83

Please sign in to comment.