Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147399
b: refs/heads/master
c: a29aa8a
h: refs/heads/master
i:
  147397: ea804b1
  147395: c874b35
  147391: 63ffa2e
v: v3
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Apr 29, 2009
1 parent 3a44c85 commit 370022f
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 85cf9dba92152bb4edec118b2f4f0be1ae7fdcab
refs/heads/master: a29aa8a7ff93e4196d558036928597e68337dd8d
45 changes: 37 additions & 8 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,10 +240,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
struct hw_perf_counter *hwc = &counter->hw;
int err;

/* disable temporarily */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return -ENOSYS;

if (!x86_pmu_initialized())
return -ENODEV;

Expand Down Expand Up @@ -773,7 +769,43 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
return ret;
}

static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; }
static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
{
int cpu = smp_processor_id();
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
u64 val;
int handled = 0;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
int idx;

++cpuc->interrupts;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active))
continue;
counter = cpuc->counters[idx];
hwc = &counter->hw;
x86_perf_counter_update(counter, hwc, idx);
val = atomic64_read(&hwc->prev_count);
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;
/* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx);
handled = 1;
inc_irq_stat(apic_perf_irqs);
if (perf_counter_overflow(counter, nmi, regs, 0))
amd_pmu_disable_counter(hwc, idx);
else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
/*
* do not reenable when throttled, but reload
* the register
*/
amd_pmu_disable_counter(hwc, idx);
else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
amd_pmu_enable_counter(hwc, idx);
}
return handled;
}

void perf_counter_unthrottle(void)
{
Expand All @@ -782,9 +814,6 @@ void perf_counter_unthrottle(void)
if (!x86_pmu_initialized())
return;

if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
return;

cpuc = &__get_cpu_var(cpu_hw_counters);
if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
if (printk_ratelimit())
Expand Down

0 comments on commit 370022f

Please sign in to comment.