Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147718
b: refs/heads/master
c: 9e350de
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 11, 2009
1 parent 3110cbc commit 25aad16
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: df1a132bf3d3508f863336c80a27806a2ac947e0
refs/heads/master: 9e350de37ac9607012fcf9c5314a28fbddf8f43c
9 changes: 6 additions & 3 deletions trunk/arch/powerpc/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
perf_disable();
power_pmu_read(counter);
left = counter->hw.sample_period;
counter->hw.last_period = left;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
Expand Down Expand Up @@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)

counter->hw.config = events[n];
counter->hw.counter_base = cflags[n];
atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
counter->hw.last_period = counter->hw.sample_period;
atomic64_set(&counter->hw.period_left, counter->hw.last_period);

/*
* See if we need to reserve the PMU.
Expand Down Expand Up @@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
*/
if (record) {
struct perf_sample_data data = {
.regs = regs,
.addr = 0,
.regs = regs,
.addr = 0,
.period = counter->hw.last_period,
};

if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
Expand Down
15 changes: 12 additions & 3 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)

if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
}

Expand Down Expand Up @@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}

if (unlikely(left <= 0)) {
left += period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
Expand Down Expand Up @@ -1257,16 +1260,22 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;

/* counter overflow */
handled = 1;
inc_irq_stat(apic_perf_irqs);
/*
* counter overflow
*/
handled = 1;
data.period = counter->hw.last_period;

if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;

if (perf_counter_overflow(counter, 1, &data))
amd_pmu_disable_counter(hwc, idx);
}

if (handled)
inc_irq_stat(apic_perf_irqs);

return handled;
}

Expand Down
6 changes: 4 additions & 2 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,7 @@ struct hw_perf_counter {
};
atomic64_t prev_count;
u64 sample_period;
u64 last_period;
atomic64_t period_left;
u64 interrupts;

Expand Down Expand Up @@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
extern void perf_counter_update_userpage(struct perf_counter *counter);

struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
struct pt_regs *regs;
u64 addr;
u64 period;
};

extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
Expand Down
9 changes: 6 additions & 3 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
perf_output_put(&handle, cpu_entry);

if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(&handle, counter->hw.sample_period);
perf_output_put(&handle, data->period);

/*
* XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
Expand Down Expand Up @@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
}

if (unlikely(left <= 0)) {
left += period;
atomic64_add(period, &hwc->period_left);
hwc->last_period = period;
}

atomic64_set(&hwc->prev_count, -left);
Expand Down Expand Up @@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
.addr = addr,
.regs = regs,
.addr = addr,
.period = counter->hw.last_period,
};

perf_swcounter_update(counter);
Expand Down

0 comments on commit 25aad16

Please sign in to comment.