Skip to content

Commit

Permalink
perf: Rework the PMU methods
Browse files Browse the repository at this point in the history
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.

The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.

This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).

It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).

The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:

 1) We disable the counter:
    a) the pmu has per-counter enable bits, we flip that
    b) we program a NOP event, preserving the counter state

 2) We store the counter state and ignore all read/overflow events

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 9, 2010
1 parent fa407f3 commit a4eaf7f
Show file tree
Hide file tree
Showing 14 changed files with 576 additions and 331 deletions.
71 changes: 53 additions & 18 deletions arch/alpha/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
new_raw_count) != prev_raw_count)
goto again;

delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;

/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
Expand Down Expand Up @@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;

if (cpuc->current_idx[j] != PMC_NO_INDEX) {
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
continue;
if (cpuc->current_idx[j] == PMC_NO_INDEX) {
alpha_perf_event_set_period(pe, hwc, idx);
cpuc->current_idx[j] = idx;
}

alpha_perf_event_set_period(pe, hwc, idx);
cpuc->current_idx[j] = idx;
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
if (!(hwc->state & PERF_HES_STOPPED))
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
Expand All @@ -420,7 +419,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static int alpha_pmu_enable(struct perf_event *event)
static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n0;
Expand Down Expand Up @@ -455,6 +454,10 @@ static int alpha_pmu_enable(struct perf_event *event)
}
}

hwc->state = PERF_HES_UPTODATE;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_STOPPED;

local_irq_restore(flags);
perf_pmu_enable(event->pmu);

Expand All @@ -467,7 +470,7 @@ static int alpha_pmu_enable(struct perf_event *event)
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static void alpha_pmu_disable(struct perf_event *event)
static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
Expand Down Expand Up @@ -514,13 +517,44 @@ static void alpha_pmu_read(struct perf_event *event)
}


static void alpha_pmu_unthrottle(struct perf_event *event)
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= !(1UL<<hwc->idx);
hwc->state |= PERF_HES_STOPPED;
}

if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
alpha_perf_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_UPTODATE;
}

if (cpuc->enabled)
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}


static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;

if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
alpha_perf_event_set_period(event, hwc, hwc->idx);
}

hwc->state = 0;

cpuc->idx_mask |= 1UL<<hwc->idx;
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}


Expand Down Expand Up @@ -671,7 +705,7 @@ static int alpha_pmu_event_init(struct perf_event *event)
/*
* Main entry point - enable HW performance counters.
*/
static void alpha_pmu_pmu_enable(struct pmu *pmu)
static void alpha_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

Expand All @@ -697,7 +731,7 @@ static void alpha_pmu_pmu_enable(struct pmu *pmu)
* Main entry point - disable HW performance counters.
*/

static void alpha_pmu_pmu_disable(struct pmu *pmu)
static void alpha_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

Expand All @@ -711,13 +745,14 @@ static void alpha_pmu_pmu_disable(struct pmu *pmu)
}

static struct pmu pmu = {
.pmu_enable = alpha_pmu_pmu_enable,
.pmu_disable = alpha_pmu_pmu_disable,
.pmu_enable = alpha_pmu_enable,
.pmu_disable = alpha_pmu_disable,
.event_init = alpha_pmu_event_init,
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.add = alpha_pmu_add,
.del = alpha_pmu_del,
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};


Expand Down
96 changes: 61 additions & 35 deletions arch/arm/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,46 +221,56 @@ armpmu_event_update(struct perf_event *event,
}

static void
armpmu_disable(struct perf_event *event)
armpmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;

WARN_ON(idx < 0);

clear_bit(idx, cpuc->active_mask);
armpmu->disable(hwc, idx);

barrier();

armpmu_event_update(event, hwc, idx);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;

perf_event_update_userpage(event);
armpmu_event_update(event, hwc, hwc->idx);
}

static void
armpmu_read(struct perf_event *event)
armpmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;

/* Don't read disabled counters! */
if (hwc->idx < 0)
if (!armpmu)
return;

armpmu_event_update(event, hwc, hwc->idx);
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}

static void
armpmu_unthrottle(struct perf_event *event)
armpmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;

if (!armpmu)
return;

/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
*/
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));

hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
* were throttled we simply disabled the IRQ source and the counter
* were stopped we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
Expand All @@ -269,8 +279,25 @@ armpmu_unthrottle(struct perf_event *event)
armpmu->enable(hwc, hwc->idx);
}

static void
armpmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;

WARN_ON(idx < 0);

clear_bit(idx, cpuc->active_mask);
armpmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);

perf_event_update_userpage(event);
}

static int
armpmu_enable(struct perf_event *event)
armpmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
Expand All @@ -295,11 +322,9 @@ armpmu_enable(struct perf_event *event)
cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);

/* Set the period for the event. */
armpmu_event_set_period(event, hwc, idx);

/* Enable the event. */
armpmu->enable(hwc, idx);
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
armpmu_start(event, PERF_EF_RELOAD);

/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
Expand Down Expand Up @@ -534,7 +559,7 @@ static int armpmu_event_init(struct perf_event *event)
return err;
}

static void armpmu_pmu_enable(struct pmu *pmu)
static void armpmu_enable(struct pmu *pmu)
{
/* Enable all of the perf events on hardware. */
int idx;
Expand All @@ -555,20 +580,21 @@ static void armpmu_pmu_enable(struct pmu *pmu)
armpmu->start();
}

static void armpmu_pmu_disable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu)
{
if (armpmu)
armpmu->stop();
}

static struct pmu pmu = {
.pmu_enable = armpmu_pmu_enable,
.pmu_disable= armpmu_pmu_disable,
.event_init = armpmu_event_init,
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
.add = armpmu_add,
.del = armpmu_del,
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
};

/*
Expand Down
Loading

0 comments on commit a4eaf7f

Please sign in to comment.