Skip to content

Commit

Permalink
powerpc/perf: move perf irq/nmi handling details into traps.c
Browse files Browse the repository at this point in the history
This is required in order to allow more significant differences between
NMI type interrupt handlers and regular asynchronous handlers.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210130130852.2952424-20-npiggin@gmail.com
  • Loading branch information
Nicholas Piggin authored and Michael Ellerman committed Feb 8, 2021
1 parent 3a31388 commit 156b537
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 59 deletions.
31 changes: 30 additions & 1 deletion arch/powerpc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -1892,11 +1892,40 @@ void vsx_unavailable_tm(struct pt_regs *regs)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */

void performance_monitor_exception(struct pt_regs *regs)
static void performance_monitor_exception_nmi(struct pt_regs *regs)
{
nmi_enter();

__this_cpu_inc(irq_stat.pmu_irqs);

perf_irq(regs);

nmi_exit();
}

static void performance_monitor_exception_async(struct pt_regs *regs)
{
irq_enter();

__this_cpu_inc(irq_stat.pmu_irqs);

perf_irq(regs);

irq_exit();
}

void performance_monitor_exception(struct pt_regs *regs)
{
/*
* On 64-bit, if perf interrupts hit in a local_irq_disable
* (soft-masked) region, we consider them as NMIs. This is required to
* prevent hash faults on user addresses when reading callchains (and
* looks better from an irq tracing perspective).
*/
if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
performance_monitor_exception_nmi(regs);
else
performance_monitor_exception_async(regs);
}

#ifdef CONFIG_PPC_ADV_DEBUG_REGS
Expand Down
35 changes: 2 additions & 33 deletions arch/powerpc/perf/core-book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
{
regs->result = 0;
}
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
}

static inline int siar_valid(struct pt_regs *regs)
{
Expand Down Expand Up @@ -353,15 +349,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
regs->result = use_siar;
}

/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return (regs->softe & IRQS_DISABLED);
}

/*
* On processors like P7+ that have the SIAR-Valid bit, marked instructions
* must be sampled only if the SIAR-valid bit is set.
Expand Down Expand Up @@ -2279,26 +2266,13 @@ static void __perf_event_interrupt(struct pt_regs *regs)
struct perf_event *event;
unsigned long val[8];
int found, active;
int nmi;

if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));

perf_read_regs(regs);

/*
* If perf interrupts hit in a local_irq_disable (soft-masked) region,
* we consider them as NMIs. This is required to prevent hash faults on
* user addresses when reading callchains. See the NMI test in
* do_hash_page.
*/
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();

/* Read all the PMCs since we'll need them a bunch of times */
for (i = 0; i < ppmu->n_counter; ++i)
val[i] = read_pmc(i + 1);
Expand Down Expand Up @@ -2344,8 +2318,8 @@ static void __perf_event_interrupt(struct pt_regs *regs)
}
}
}
if (!found && !nmi && printk_ratelimit())
printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
if (unlikely(!found) && !arch_irq_disabled_regs(regs))
printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");

/*
* Reset MMCR0 to its normal value. This will set PMXE and
Expand All @@ -2355,11 +2329,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);

if (nmi)
nmi_exit();
else
irq_exit();
}

static void perf_event_interrupt(struct pt_regs *regs)
Expand Down
25 changes: 0 additions & 25 deletions arch/powerpc/perf/core-fsl-emb.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,19 +31,6 @@ static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);

/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return (regs->softe & IRQS_DISABLED);
#else
return 0;
#endif
}

static void perf_event_interrupt(struct pt_regs *regs);

/*
Expand Down Expand Up @@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;

nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();

for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
Expand All @@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();

if (nmi)
nmi_exit();
else
irq_exit();
}

void hw_perf_event_setup(int cpu)
Expand Down

0 comments on commit 156b537

Please sign in to comment.