Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 156616
b: refs/heads/master
c: 7b4b665
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 9, 2009
1 parent 8537c7a commit b61e605
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 71 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 46ab976443c6c566c8fe6fc72a6733a55ba9fbea
refs/heads/master: 7b4b6658e152ed4568cfff48175d93645df081d1
164 changes: 94 additions & 70 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -3344,87 +3344,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
* Generic software counter infrastructure
*/

static void perf_swcounter_update(struct perf_counter *counter)
/*
* We directly increment counter->count and keep a second value in
* counter->hw.period_left to count intervals. This period counter
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/

static u64 perf_swcounter_set_period(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
u64 prev, now;
s64 delta;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;

hwc->last_period = hwc->sample_period;

again:
prev = atomic64_read(&hwc->prev_count);
now = atomic64_read(&hwc->count);
if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
goto again;
old = val = atomic64_read(&hwc->period_left);
if (val < 0)
return 0;

delta = now - prev;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;

atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
return nr;
}

static void perf_swcounter_set_period(struct perf_counter *counter)
static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct perf_sample_data *data)
{
struct hw_perf_counter *hwc = &counter->hw;
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
u64 overflow;

if (unlikely(left <= -period)) {
left = period;
atomic64_set(&hwc->period_left, left);
hwc->last_period = period;
}
data->period = counter->hw.last_period;
overflow = perf_swcounter_set_period(counter);

if (unlikely(left <= 0)) {
left += period;
atomic64_add(period, &hwc->period_left);
hwc->last_period = period;
}
if (hwc->interrupts == MAX_INTERRUPTS)
return;

atomic64_set(&hwc->prev_count, -left);
atomic64_set(&hwc->count, -left);
for (; overflow; overflow--) {
if (perf_counter_overflow(counter, nmi, data)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
}
}

static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
static void perf_swcounter_unthrottle(struct perf_counter *counter)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct perf_counter *counter;
u64 period;

counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);

data.addr = 0;
data.regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
* Nothing to do, we already reset hwc->interrupts.
*/
if ((counter->attr.exclude_kernel || !data.regs) &&
!counter->attr.exclude_user)
data.regs = task_pt_regs(current);
}

if (data.regs) {
if (perf_counter_overflow(counter, 0, &data))
ret = HRTIMER_NORESTART;
}
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
int nmi, struct perf_sample_data *data)
{
struct hw_perf_counter *hwc = &counter->hw;

period = max_t(u64, 10000, counter->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
atomic64_add(nr, &counter->count);

return ret;
}
if (!hwc->sample_period)
return;

static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct perf_sample_data *data)
{
data->period = counter->hw.last_period;
if (!data->regs)
return;

perf_swcounter_update(counter);
perf_swcounter_set_period(counter);
if (perf_counter_overflow(counter, nmi, data))
/* soft-disable the counter */
;
if (!atomic64_add_negative(nr, &hwc->period_left))
perf_swcounter_overflow(counter, nmi, data);
}

static int perf_swcounter_is_counting(struct perf_counter *counter)
Expand Down Expand Up @@ -3488,15 +3482,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
return 1;
}

static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
int nmi, struct perf_sample_data *data)
{
int neg = atomic64_add_negative(nr, &counter->hw.count);

if (counter->hw.sample_period && !neg && data->regs)
perf_swcounter_overflow(counter, nmi, data);
}

static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
enum perf_type_id type,
u32 event, u64 nr, int nmi,
Expand Down Expand Up @@ -3575,26 +3560,65 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,

static void perf_swcounter_read(struct perf_counter *counter)
{
perf_swcounter_update(counter);
}

static int perf_swcounter_enable(struct perf_counter *counter)
{
perf_swcounter_set_period(counter);
struct hw_perf_counter *hwc = &counter->hw;

if (hwc->sample_period) {
hwc->last_period = hwc->sample_period;
perf_swcounter_set_period(counter);
}
return 0;
}

static void perf_swcounter_disable(struct perf_counter *counter)
{
perf_swcounter_update(counter);
}

static const struct pmu perf_ops_generic = {
.enable = perf_swcounter_enable,
.disable = perf_swcounter_disable,
.read = perf_swcounter_read,
.unthrottle = perf_swcounter_unthrottle,
};

/*
* hrtimer based swcounter callback
*/

static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct perf_counter *counter;
u64 period;

counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);

data.addr = 0;
data.regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
*/
if ((counter->attr.exclude_kernel || !data.regs) &&
!counter->attr.exclude_user)
data.regs = task_pt_regs(current);

if (data.regs) {
if (perf_counter_overflow(counter, 0, &data))
ret = HRTIMER_NORESTART;
}

period = max_t(u64, 10000, counter->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));

return ret;
}

/*
* Software counter: cpu wall time clock
*/
Expand Down

0 comments on commit b61e605

Please sign in to comment.