Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147381
b: refs/heads/master
c: 4aeb0b4
h: refs/heads/master
i:
  147379: 00be2a4
v: v3
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Apr 29, 2009
1 parent 29ffd69 commit a5018eb
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 74 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 527e26af3741a2168986d8b82653ffe173891324
refs/heads/master: 4aeb0b4239bb3b67ed402cb9cef3e000c892cadf
25 changes: 12 additions & 13 deletions trunk/arch/powerpc/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new)
return 0;
}

static void power_perf_read(struct perf_counter *counter)
static void power_pmu_read(struct perf_counter *counter)
{
long val, delta, prev;

Expand Down Expand Up @@ -405,7 +405,7 @@ void hw_perf_restore(u64 disable)
for (i = 0; i < cpuhw->n_counters; ++i) {
counter = cpuhw->counter[i];
if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
power_perf_read(counter);
power_pmu_read(counter);
write_pmc(counter->hw.idx, 0);
counter->hw.idx = 0;
}
Expand Down Expand Up @@ -477,7 +477,7 @@ static void counter_sched_in(struct perf_counter *counter, int cpu)
counter->oncpu = cpu;
counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
if (is_software_counter(counter))
counter->hw_ops->enable(counter);
counter->pmu->enable(counter);
}

/*
Expand Down Expand Up @@ -533,7 +533,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
* re-enable the PMU in order to get hw_perf_restore to do the
* actual work of reconfiguring the PMU.
*/
static int power_perf_enable(struct perf_counter *counter)
static int power_pmu_enable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuhw;
unsigned long flags;
Expand Down Expand Up @@ -573,7 +573,7 @@ static int power_perf_enable(struct perf_counter *counter)
/*
* Remove a counter from the PMU.
*/
static void power_perf_disable(struct perf_counter *counter)
static void power_pmu_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuhw;
long i;
Expand All @@ -583,7 +583,7 @@ static void power_perf_disable(struct perf_counter *counter)
local_irq_save(flags);
pmudis = hw_perf_save_disable();

power_perf_read(counter);
power_pmu_read(counter);

cpuhw = &__get_cpu_var(cpu_hw_counters);
for (i = 0; i < cpuhw->n_counters; ++i) {
Expand All @@ -607,10 +607,10 @@ static void power_perf_disable(struct perf_counter *counter)
local_irq_restore(flags);
}

struct hw_perf_counter_ops power_perf_ops = {
.enable = power_perf_enable,
.disable = power_perf_disable,
.read = power_perf_read
struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
};

/* Number of perf_counters counting hardware events */
Expand All @@ -631,8 +631,7 @@ static void hw_perf_counter_destroy(struct perf_counter *counter)
}
}

const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
unsigned long ev;
struct perf_counter *ctrs[MAX_HWCOUNTERS];
Expand Down Expand Up @@ -705,7 +704,7 @@ hw_perf_counter_init(struct perf_counter *counter)

if (err)
return ERR_PTR(err);
return &power_perf_ops;
return &power_pmu;
}

/*
Expand Down
37 changes: 18 additions & 19 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -515,8 +515,8 @@ __pmc_fixed_disable(struct perf_counter *counter,
}

static inline void
__pmc_generic_disable(struct perf_counter *counter,
struct hw_perf_counter *hwc, unsigned int idx)
__x86_pmu_disable(struct perf_counter *counter,
struct hw_perf_counter *hwc, unsigned int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
__pmc_fixed_disable(counter, hwc, idx);
Expand Down Expand Up @@ -591,8 +591,8 @@ __pmc_fixed_enable(struct perf_counter *counter,
}

static void
__pmc_generic_enable(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
__x86_pmu_enable(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
__pmc_fixed_enable(counter, hwc, idx);
Expand Down Expand Up @@ -626,7 +626,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
/*
* Find a PMC slot for the freshly enabled / scheduled in counter:
*/
static int pmc_generic_enable(struct perf_counter *counter)
static int x86_pmu_enable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
Expand Down Expand Up @@ -667,7 +667,7 @@ static int pmc_generic_enable(struct perf_counter *counter)

perf_counters_lapic_init(hwc->nmi);

__pmc_generic_disable(counter, hwc, idx);
__x86_pmu_disable(counter, hwc, idx);

cpuc->counters[idx] = counter;
/*
Expand All @@ -676,7 +676,7 @@ static int pmc_generic_enable(struct perf_counter *counter)
barrier();

__hw_perf_counter_set_period(counter, hwc, idx);
__pmc_generic_enable(counter, hwc, idx);
__x86_pmu_enable(counter, hwc, idx);

return 0;
}
Expand Down Expand Up @@ -731,13 +731,13 @@ void perf_counter_print_debug(void)
local_irq_enable();
}

static void pmc_generic_disable(struct perf_counter *counter)
static void x86_pmu_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
unsigned int idx = hwc->idx;

__pmc_generic_disable(counter, hwc, idx);
__x86_pmu_disable(counter, hwc, idx);

clear_bit(idx, cpuc->used);
cpuc->counters[idx] = NULL;
Expand Down Expand Up @@ -767,7 +767,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
__hw_perf_counter_set_period(counter, hwc, idx);

if (counter->state == PERF_COUNTER_STATE_ACTIVE)
__pmc_generic_enable(counter, hwc, idx);
__x86_pmu_enable(counter, hwc, idx);
}

/*
Expand Down Expand Up @@ -805,7 +805,7 @@ static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)

perf_save_and_restart(counter);
if (perf_counter_overflow(counter, nmi, regs, 0))
__pmc_generic_disable(counter, &counter->hw, bit);
__x86_pmu_disable(counter, &counter->hw, bit);
}

hw_perf_ack_status(ack);
Expand Down Expand Up @@ -1034,27 +1034,26 @@ void __init init_hw_perf_counters(void)
register_die_notifier(&perf_counter_nmi_notifier);
}

static void pmc_generic_read(struct perf_counter *counter)
static void x86_pmu_read(struct perf_counter *counter)
{
x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}

static const struct hw_perf_counter_ops x86_perf_counter_ops = {
.enable = pmc_generic_enable,
.disable = pmc_generic_disable,
.read = pmc_generic_read,
static const struct pmu pmu = {
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
.read = x86_pmu_read,
};

const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
int err;

err = __hw_perf_counter_init(counter);
if (err)
return ERR_PTR(err);

return &x86_perf_counter_ops;
return &pmu;
}

/*
Expand Down
9 changes: 4 additions & 5 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -334,9 +334,9 @@ struct hw_perf_counter {
struct perf_counter;

/**
* struct hw_perf_counter_ops - performance counter hw ops
* struct pmu - generic performance monitoring unit
*/
struct hw_perf_counter_ops {
struct pmu {
int (*enable) (struct perf_counter *counter);
void (*disable) (struct perf_counter *counter);
void (*read) (struct perf_counter *counter);
Expand Down Expand Up @@ -381,7 +381,7 @@ struct perf_counter {
struct list_head sibling_list;
int nr_siblings;
struct perf_counter *group_leader;
const struct hw_perf_counter_ops *hw_ops;
const struct pmu *pmu;

enum perf_counter_active_state state;
enum perf_counter_active_state prev_state;
Expand Down Expand Up @@ -519,8 +519,7 @@ struct perf_cpu_context {
*/
extern int perf_max_counters;

extern const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter);
extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);

extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
Expand Down
Loading

0 comments on commit a5018eb

Please sign in to comment.