Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147307
b: refs/heads/master
c: 7595d63
h: refs/heads/master
i:
  147305: b8c3774
  147303: c4566f7
v: v3
  • Loading branch information
Paul Mackerras authored and Ingo Molnar committed Apr 6, 2009
1 parent e2d2a6c commit eb64b76
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3c1ba6fafecaed295017881f8863a18602f32c1d
refs/heads/master: 7595d63b3a9ce65d14c4fbd0e7de448a343d7215
47 changes: 42 additions & 5 deletions trunk/arch/powerpc/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ struct power_pmu *ppmu;
*/
static unsigned int freeze_counters_kernel = MMCR0_FCS;

static void perf_counter_interrupt(struct pt_regs *regs);

void perf_counter_print_debug(void)
{
}
Expand Down Expand Up @@ -594,13 +596,32 @@ struct hw_perf_counter_ops power_perf_ops = {
.read = power_perf_read
};

/* Number of perf_counters counting hardware events */
static atomic_t num_counters;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);

/*
* Release the PMU if this is the last perf_counter.
*/
static void hw_perf_counter_destroy(struct perf_counter *counter)
{
if (!atomic_add_unless(&num_counters, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_counters) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}

const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
{
unsigned long ev;
struct perf_counter *ctrs[MAX_HWCOUNTERS];
unsigned int events[MAX_HWCOUNTERS];
int n;
int err;

if (!ppmu)
return NULL;
Expand Down Expand Up @@ -646,6 +667,27 @@ hw_perf_counter_init(struct perf_counter *counter)

counter->hw.config = events[n];
atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);

/*
* See if we need to reserve the PMU.
* If no counters are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_counters)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_counters) == 0 &&
reserve_pmc_hardware(perf_counter_interrupt))
err = -EBUSY;
else
atomic_inc(&num_counters);
mutex_unlock(&pmc_reserve_mutex);
}
counter->destroy = hw_perf_counter_destroy;

if (err)
return NULL;
return &power_perf_ops;
}

Expand Down Expand Up @@ -769,11 +811,6 @@ static int init_perf_counters(void)
{
unsigned long pvr;

if (reserve_pmc_hardware(perf_counter_interrupt)) {
printk(KERN_ERR "Couldn't init performance monitor subsystem\n");
return -EBUSY;
}

/* XXX should get this from cputable */
pvr = mfspr(SPRN_PVR);
switch (PVR_VER(pvr)) {
Expand Down

0 comments on commit eb64b76

Please sign in to comment.