Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 211818
b: refs/heads/master
c: 33696fc
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 9, 2010
1 parent f756f7b commit 8cda9c0
Show file tree
Hide file tree
Showing 10 changed files with 120 additions and 100 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7
refs/heads/master: 33696fc0d141bbbcb12f75b69608ea83282e3117
30 changes: 16 additions & 14 deletions trunk/arch/alpha/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ static int alpha_pmu_enable(struct perf_event *event)
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
perf_disable();
perf_pmu_disable(event->pmu);
local_irq_save(flags);

/* Default to error to be returned */
Expand All @@ -456,7 +456,7 @@ static int alpha_pmu_enable(struct perf_event *event)
}

local_irq_restore(flags);
perf_enable();
perf_pmu_enable(event->pmu);

return ret;
}
Expand All @@ -474,7 +474,7 @@ static void alpha_pmu_disable(struct perf_event *event)
unsigned long flags;
int j;

perf_disable();
perf_pmu_disable(event->pmu);
local_irq_save(flags);

for (j = 0; j < cpuc->n_events; j++) {
Expand Down Expand Up @@ -502,7 +502,7 @@ static void alpha_pmu_disable(struct perf_event *event)
}

local_irq_restore(flags);
perf_enable();
perf_pmu_enable(event->pmu);
}


Expand Down Expand Up @@ -668,18 +668,10 @@ static int alpha_pmu_event_init(struct perf_event *event)
return err;
}

static struct pmu pmu = {
.event_init = alpha_pmu_event_init,
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};

/*
* Main entry point - enable HW performance counters.
*/
void hw_perf_enable(void)
static void alpha_pmu_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

Expand All @@ -705,7 +697,7 @@ void hw_perf_enable(void)
* Main entry point - disable HW performance counters.
*/

void hw_perf_disable(void)
static void alpha_pmu_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

Expand All @@ -718,6 +710,16 @@ void hw_perf_disable(void)
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}

static struct pmu pmu = {
.pmu_enable = alpha_pmu_pmu_enable,
.pmu_disable = alpha_pmu_pmu_disable,
.event_init = alpha_pmu_event_init,
.enable = alpha_pmu_enable,
.disable = alpha_pmu_disable,
.read = alpha_pmu_read,
.unthrottle = alpha_pmu_unthrottle,
};


/*
* Main entry point - don't know when this is called but it
Expand Down
28 changes: 14 additions & 14 deletions trunk/arch/arm/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ armpmu_enable(struct perf_event *event)
int idx;
int err = 0;

perf_disable();
perf_pmu_disable(event->pmu);

/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc);
Expand Down Expand Up @@ -305,7 +305,7 @@ armpmu_enable(struct perf_event *event)
perf_event_update_userpage(event);

out:
perf_enable();
perf_pmu_enable(event->pmu);
return err;
}

Expand Down Expand Up @@ -534,16 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
return err;
}

static struct pmu pmu = {
.event_init = armpmu_event_init,
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
};

void
hw_perf_enable(void)
static void armpmu_pmu_enable(struct pmu *pmu)
{
/* Enable all of the perf events on hardware. */
int idx;
Expand All @@ -564,13 +555,22 @@ hw_perf_enable(void)
armpmu->start();
}

void
hw_perf_disable(void)
static void armpmu_pmu_disable(struct pmu *pmu)
{
if (armpmu)
armpmu->stop();
}

static struct pmu pmu = {
.pmu_enable = armpmu_pmu_enable,
.pmu_disable= armpmu_pmu_disable,
.event_init = armpmu_event_init,
.enable = armpmu_enable,
.disable = armpmu_disable,
.unthrottle = armpmu_unthrottle,
.read = armpmu_read,
};

/*
* ARMv6 Performance counter handling code.
*
Expand Down
24 changes: 13 additions & 11 deletions trunk/arch/powerpc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
void hw_perf_disable(void)
static void power_pmu_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
Expand Down Expand Up @@ -565,7 +565,7 @@ void hw_perf_disable(void)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
static void power_pmu_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
Expand Down Expand Up @@ -735,7 +735,7 @@ static int power_pmu_enable(struct perf_event *event)
int ret = -EAGAIN;

local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);

/*
* Add the event to the list (if there is room)
Expand Down Expand Up @@ -769,7 +769,7 @@ static int power_pmu_enable(struct perf_event *event)

ret = 0;
out:
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
Expand All @@ -784,7 +784,7 @@ static void power_pmu_disable(struct perf_event *event)
unsigned long flags;

local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);

power_pmu_read(event);

Expand Down Expand Up @@ -821,7 +821,7 @@ static void power_pmu_disable(struct perf_event *event)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}

perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}

Expand All @@ -837,7 +837,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
if (!event->hw.idx || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);
power_pmu_read(event);
left = event->hw.sample_period;
event->hw.last_period = left;
Expand All @@ -848,7 +848,7 @@ static void power_pmu_unthrottle(struct perf_event *event)
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}

Expand All @@ -861,7 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

perf_disable();
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
Expand All @@ -876,7 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);

cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
perf_pmu_enable(pmu);
}

/*
Expand All @@ -903,7 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
cpuhw->event[i]->hw.config = cpuhw->events[i];

cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
perf_pmu_enable(pmu);
return 0;
}

Expand Down Expand Up @@ -1131,6 +1131,8 @@ static int power_pmu_event_init(struct perf_event *event)
}

struct pmu power_pmu = {
.pmu_enable = power_pmu_pmu_enable,
.pmu_disable = power_pmu_pmu_disable,
.event_init = power_pmu_event_init,
.enable = power_pmu_enable,
.disable = power_pmu_disable,
Expand Down
18 changes: 10 additions & 8 deletions trunk/arch/powerpc/kernel/perf_event_fsl_emb.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
void hw_perf_disable(void)
static void fsl_emb_pmu_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
Expand Down Expand Up @@ -216,7 +216,7 @@ void hw_perf_disable(void)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
static void fsl_emb_pmu_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
Expand Down Expand Up @@ -271,7 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
u64 val;
int i;

perf_disable();
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);

if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
Expand Down Expand Up @@ -311,7 +311,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_enable();
perf_pmu_enable(event->pmu);
return ret;
}

Expand All @@ -321,7 +321,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;

perf_disable();
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;

Expand Down Expand Up @@ -349,7 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
cpuhw->n_events--;

out:
perf_enable();
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}

Expand All @@ -367,7 +367,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
left = event->hw.sample_period;
event->hw.last_period = left;
Expand All @@ -378,7 +378,7 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
perf_enable();
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}

Expand Down Expand Up @@ -524,6 +524,8 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
}

static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_pmu_enable,
.pmu_disable = fsl_emb_pmu_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.enable = fsl_emb_pmu_enable,
.disable = fsl_emb_pmu_disable,
Expand Down
Loading

0 comments on commit 8cda9c0

Please sign in to comment.