Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147467
b: refs/heads/master
c: 0bbd0d4
h: refs/heads/master
i:
  147465: dd17eb9
  147463: b51e829
v: v3
  • Loading branch information
Paul Mackerras authored and Ingo Molnar committed May 15, 2009
1 parent c0d1875 commit cb4689b
Show file tree
Hide file tree
Showing 6 changed files with 98 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9d23a90a67261e73b2fcac04d8ca963c6b496afb
refs/heads/master: 0bbd0d4be8d5d3676c126e06e3c75c16def00441
14 changes: 13 additions & 1 deletion trunk/arch/powerpc/include/asm/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,32 @@ struct power_pmu {
u64 alt[]);
void (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
int (*limited_pmc_event)(u64 event);
int limited_pmc5_6; /* PMC5 and PMC6 have limited function */
u32 flags;
int n_generic;
int *generic_events;
};

extern struct power_pmu *ppmu;

/*
* Values for power_pmu.flags
*/
#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */

/*
* Values for flags to get_alternatives()
*/
#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */

struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)

extern unsigned long perf_instruction_pointer(struct pt_regs *regs);

/*
* The power_pmu.get_constraint function returns a 64-bit value and
* a 64-bit mask that express the constraints between this event and
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -492,11 +492,13 @@
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */
#define MMCRA_SLOT_SHIFT 24
#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */
#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
#define POWER6_MMCRA_THRM 0x00000020UL
Expand Down
84 changes: 80 additions & 4 deletions trunk/arch/powerpc/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>

struct cpu_hw_counters {
int n_counters;
Expand Down Expand Up @@ -310,7 +311,8 @@ static void power_pmu_read(struct perf_counter *counter)
*/
static int is_limited_pmc(int pmcnum)
{
return ppmu->limited_pmc5_6 && (pmcnum == 5 || pmcnum == 6);
return (ppmu->flags & PPMU_LIMITED_PMC5_6)
&& (pmcnum == 5 || pmcnum == 6);
}

static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
Expand Down Expand Up @@ -860,7 +862,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
* If this machine has limited counters, check whether this
* event could go on a limited counter.
*/
if (ppmu->limited_pmc5_6) {
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(counter, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
Expand Down Expand Up @@ -933,6 +935,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
u64 period = counter->hw.irq_period;
s64 prev, delta, left;
int record = 0;
u64 addr, mmcra, sdsync;

/* we don't have to worry about interrupts here */
prev = atomic64_read(&counter->hw.prev_count);
Expand Down Expand Up @@ -963,8 +966,76 @@ static void record_and_restart(struct perf_counter *counter, long val,
/*
* Finally record data if requested.
*/
if (record)
perf_counter_overflow(counter, nmi, regs, 0);
if (record) {
addr = 0;
if (counter->hw_event.record_type & PERF_RECORD_ADDR) {
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling,
* give them the SDAR (sampled data address).
* If we are doing instruction sampling, then only
* give them the SDAR if it corresponds to the
* instruction pointed to by SIAR; this is indicated
* by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
*/
mmcra = regs->dsisr;
sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
addr = mfspr(SPRN_SDAR);
}
perf_counter_overflow(counter, nmi, regs, addr);
}
}

/*
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra;

if (TRAP(regs) != 0xf00) {
/* not a PMU interrupt */
return user_mode(regs) ? PERF_EVENT_MISC_USER :
PERF_EVENT_MISC_KERNEL;
}

mmcra = regs->dsisr;
if (ppmu->flags & PPMU_ALT_SIPR) {
if (mmcra & POWER6_MMCRA_SIHV)
return PERF_EVENT_MISC_HYPERVISOR;
return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
PERF_EVENT_MISC_KERNEL;
}
if (mmcra & MMCRA_SIHV)
return PERF_EVENT_MISC_HYPERVISOR;
return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
PERF_EVENT_MISC_KERNEL;
}

/*
* Called from generic code to get the instruction pointer
* for an event.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long mmcra;
unsigned long ip;
unsigned long slot;

if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */

ip = mfspr(SPRN_SIAR);
mmcra = regs->dsisr;
if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
ip += 4 * (slot - 1);
}
return ip;
}

/*
Expand All @@ -983,6 +1054,11 @@ static void perf_counter_interrupt(struct pt_regs *regs)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));

/*
* Overload regs->dsisr to store MMCRA so we only need to read it once.
*/
regs->dsisr = mfspr(SPRN_MMCRA);

/*
* If interrupts were soft-disabled when this PMU interrupt
* occurred, treat it as an NMI.
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/power5+-pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -625,6 +625,6 @@ struct power_pmu power5p_pmu = {
.disable_pmc = power5p_disable_pmc,
.n_generic = ARRAY_SIZE(power5p_generic_events),
.generic_events = power5p_generic_events,
.limited_pmc5_6 = 1,
.flags = PPMU_LIMITED_PMC5_6,
.limited_pmc_event = power5p_limited_pmc_event,
};
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/power6-pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,6 @@ struct power_pmu power6_pmu = {
.disable_pmc = p6_disable_pmc,
.n_generic = ARRAY_SIZE(power6_generic_events),
.generic_events = power6_generic_events,
.limited_pmc5_6 = 1,
.flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
.limited_pmc_event = p6_limited_pmc_event,
};

0 comments on commit cb4689b

Please sign in to comment.