Skip to content

Commit

Permalink
perf_counter, x86: make interrupt handler model specific
Browse files Browse the repository at this point in the history
This separates the perfcounter interrupt handler for AMD and Intel
cpus. The AMD interrupt handler implementation is a follow-on patch.

[ Impact: refactor and clean up code ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-9-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Apr 29, 2009
1 parent 5f4ec28 commit 39d81ea
Showing 1 changed file with 13 additions and 3 deletions.
16 changes: 13 additions & 3 deletions arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
* Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
* Copyright(C) 2009 Jaswinder Singh Rajput
* Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter
*
* For licencing details see kernel-base/COPYING
*/
Expand Down Expand Up @@ -47,6 +48,7 @@ struct cpu_hw_counters {
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
int (*handle_irq)(struct pt_regs *, int);
u64 (*save_disable_all)(void);
void (*restore_all)(u64);
u64 (*get_status)(u64);
Expand Down Expand Up @@ -241,6 +243,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
struct hw_perf_counter *hwc = &counter->hw;
int err;

/* disable temporarily */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return -ENOSYS;

if (unlikely(!perf_counters_initialized))
return -EINVAL;

Expand Down Expand Up @@ -780,7 +786,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
u64 ack, status;
Expand Down Expand Up @@ -827,6 +833,8 @@ static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
return ret;
}

static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; }

void perf_counter_unthrottle(void)
{
struct cpu_hw_counters *cpuc;
Expand All @@ -851,7 +859,7 @@ void smp_perf_counter_interrupt(struct pt_regs *regs)
irq_enter();
apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
ack_APIC_irq();
__smp_perf_counter_interrupt(regs, 0);
x86_pmu->handle_irq(regs, 0);
irq_exit();
}

Expand Down Expand Up @@ -908,7 +916,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
regs = args->regs;

apic_write(APIC_LVTPC, APIC_DM_NMI);
ret = __smp_perf_counter_interrupt(regs, 1);
ret = x86_pmu->handle_irq(regs, 1);

return ret ? NOTIFY_STOP : NOTIFY_OK;
}
Expand All @@ -920,6 +928,7 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
};

static struct x86_pmu intel_pmu = {
.handle_irq = intel_pmu_handle_irq,
.save_disable_all = intel_pmu_save_disable_all,
.restore_all = intel_pmu_restore_all,
.get_status = intel_pmu_get_status,
Expand All @@ -934,6 +943,7 @@ static struct x86_pmu intel_pmu = {
};

static struct x86_pmu amd_pmu = {
.handle_irq = amd_pmu_handle_irq,
.save_disable_all = amd_pmu_save_disable_all,
.restore_all = amd_pmu_restore_all,
.get_status = amd_pmu_get_status,
Expand Down

0 comments on commit 39d81ea

Please sign in to comment.