diff --git a/[refs] b/[refs] index a55df4868420..c299f954821b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: db59932f62386cdfd8510c27a83118c5e915e9ea +refs/heads/master: c6ac1e6edacc7e1fb0405d61f95a797c6a712411 diff --git a/trunk/arch/metag/kernel/perf/perf_event.c b/trunk/arch/metag/kernel/perf/perf_event.c index a00f527eade5..5bf984feaaa1 100644 --- a/trunk/arch/metag/kernel/perf/perf_event.c +++ b/trunk/arch/metag/kernel/perf/perf_event.c @@ -240,8 +240,10 @@ int metag_pmu_event_set_period(struct perf_event *event, if (left > (s64)metag_pmu->max_period) left = metag_pmu->max_period; - if (metag_pmu->write) - metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); + if (metag_pmu->write) { + local64_set(&hwc->prev_count, -(s32)left); + metag_pmu->write(idx, -left & MAX_PERIOD); + } perf_event_update_userpage(event); @@ -651,6 +653,12 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) * set to a specific value that needs preserving. */ tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff; + else + /* + * Older cores reset the counter on write, so prev_count needs + * resetting too so we can calculate a correct delta. + */ + local64_set(&event->prev_count, 0); metag_out32(tmp, PERF_COUNT(idx)); unlock: