Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147607
b: refs/heads/master
c: e4abb5d
h: refs/heads/master
i:
  147605: 46424d8
  147603: fc6e47c
  147599: ac19616
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 2, 2009
1 parent fc8367d commit e2c26fe
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8a016db386195b193e2a8aeddff9fe937dcb7a40
refs/heads/master: e4abb5d4f7ddabc1fc7c392cf0a10d8e5868c9ca
31 changes: 22 additions & 9 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (!hwc->sample_period)
hwc->sample_period = x86_pmu.max_period;

atomic64_set(&hwc->period_left,
min(x86_pmu.max_period, hwc->sample_period));
atomic64_set(&hwc->period_left, hwc->sample_period);

/*
* Raw event type provide the config in the event structure
Expand Down Expand Up @@ -451,32 +450,37 @@ static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the counter disabled in hw:
*/
static void
static int
x86_perf_counter_set_period(struct perf_counter *counter,
struct hw_perf_counter *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = min(x86_pmu.max_period, hwc->sample_period);
int err;
s64 period = hwc->sample_period;
int err, ret = 0;

/*
* If we are way outside a reasoable range then just skip forward:
*/
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
ret = 1;
}

if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
ret = 1;
}
/*
* Quirk: certain CPUs dont like it if just 1 event is left:
*/
if (unlikely(left < 2))
left = 2;

if (left > x86_pmu.max_period)
left = x86_pmu.max_period;

per_cpu(prev_left[idx], smp_processor_id()) = left;

/*
Expand All @@ -487,6 +491,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,

err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask);

return ret;
}

static inline void
Expand Down Expand Up @@ -706,16 +712,19 @@ static void x86_pmu_disable(struct perf_counter *counter)
* Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops:
*/
static void intel_pmu_save_and_restart(struct perf_counter *counter)
static int intel_pmu_save_and_restart(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx;
int ret;

x86_perf_counter_update(counter, hwc, idx);
x86_perf_counter_set_period(counter, hwc, idx);
ret = x86_perf_counter_set_period(counter, hwc, idx);

if (counter->state == PERF_COUNTER_STATE_ACTIVE)
intel_pmu_enable_counter(hwc, idx);

return ret;
}

static void intel_pmu_reset(void)
Expand Down Expand Up @@ -782,7 +791,9 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
if (!test_bit(bit, cpuc->active_mask))
continue;

intel_pmu_save_and_restart(counter);
if (!intel_pmu_save_and_restart(counter))
continue;

if (perf_counter_overflow(counter, nmi, regs, 0))
intel_pmu_disable_counter(&counter->hw, bit);
}
Expand Down Expand Up @@ -824,9 +835,11 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
continue;

/* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx);
handled = 1;
inc_irq_stat(apic_perf_irqs);
if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;

if (perf_counter_overflow(counter, nmi, regs, 0))
amd_pmu_disable_counter(hwc, idx);
}
Expand Down

0 comments on commit e2c26fe

Please sign in to comment.