Skip to content

Commit

Permalink
perf_counter: Accurate period data
Browse files Browse the repository at this point in the history
We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is
incorrect. When we adjust the period, it will only take effect the next
cycle but report it for the current cycle. So when we adjust the period
for every cycle, we're always wrong.

Solve this by keeping track of the last_period.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 11, 2009
1 parent df1a132 commit 9e350de
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 11 deletions.
9 changes: 6 additions & 3 deletions arch/powerpc/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
perf_disable();
power_pmu_read(counter);
left = counter->hw.sample_period;
counter->hw.last_period = left;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
Expand Down Expand Up @@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)

counter->hw.config = events[n];
counter->hw.counter_base = cflags[n];
atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
counter->hw.last_period = counter->hw.sample_period;
atomic64_set(&counter->hw.period_left, counter->hw.last_period);

/*
* See if we need to reserve the PMU.
Expand Down Expand Up @@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
*/
if (record) {
struct perf_sample_data data = {
.regs = regs,
.addr = 0,
.regs = regs,
.addr = 0,
.period = counter->hw.last_period,
};

if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
Expand Down
15 changes: 12 additions & 3 deletions arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)

if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
}

Expand Down Expand Up @@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}

if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
Expand Down Expand Up @@ -1257,16 +1260,22 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;

/* counter overflow */
handled = 1;
inc_irq_stat(apic_perf_irqs);
/*
* counter overflow
*/
handled = 1;
data.period = counter->hw.last_period;

if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;

if (perf_counter_overflow(counter, 1, &data))
amd_pmu_disable_counter(hwc, idx);
}

if (handled)
inc_irq_stat(apic_perf_irqs);

return handled;
}

Expand Down
6 changes: 4 additions & 2 deletions include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,7 @@ struct hw_perf_counter {
};
atomic64_t prev_count;
u64 sample_period;
u64 last_period;
atomic64_t period_left;
u64 interrupts;

Expand Down Expand Up @@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
extern void perf_counter_update_userpage(struct perf_counter *counter);

struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
struct pt_regs *regs;
u64 addr;
u64 period;
};

extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
Expand Down
9 changes: 6 additions & 3 deletions kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
perf_output_put(&handle, cpu_entry);

if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(&handle, counter->hw.sample_period);
perf_output_put(&handle, data->period);

/*
* XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
Expand Down Expand Up @@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
}

if (unlikely(left <= 0)) {
left += period;
atomic64_add(period, &hwc->period_left);
hwc->last_period = period;
}

atomic64_set(&hwc->prev_count, -left);
Expand Down Expand Up @@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
.addr = addr,
.regs = regs,
.addr = addr,
.period = counter->hw.last_period,
};

perf_swcounter_update(counter);
Expand Down

0 comments on commit 9e350de

Please sign in to comment.