Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147165
b: refs/heads/master
c: 8fb9331
h: refs/heads/master
i:
  147163: 645bf99
v: v3
  • Loading branch information
Ingo Molnar committed Dec 23, 2008
1 parent 4580181 commit 1f06e1c
Show file tree
Hide file tree
Showing 4 changed files with 1 addition and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 94c46572a6d9bb497eda0a14099d9f1360d57d5d
refs/heads/master: 8fb9331391af95ca1f4e5c0a0da8120b13cbae01
7 changes: 0 additions & 7 deletions trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ x86_perf_counter_update(struct perf_counter *counter,
{
u64 prev_raw_count, new_raw_count, delta;

WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE);
/*
* Careful: an NMI might modify the previous counter value.
*
Expand All @@ -89,7 +88,6 @@ x86_perf_counter_update(struct perf_counter *counter,
* of the count, so we do that by clipping the delta to 32 bits:
*/
delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
WARN_ON_ONCE((int)delta < 0);

atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
Expand Down Expand Up @@ -193,7 +191,6 @@ __x86_perf_counter_disable(struct perf_counter *counter,
int err;

err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
WARN_ON_ONCE(err);
}

static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
Expand All @@ -209,8 +206,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
s32 left = atomic64_read(&hwc->period_left);
s32 period = hwc->irq_period;

WARN_ON_ONCE(period <= 0);

/*
* If we are way outside a reasoable range then just skip forward:
*/
Expand All @@ -224,8 +219,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
atomic64_set(&hwc->period_left, left);
}

WARN_ON_ONCE(left <= 0);

per_cpu(prev_left[idx], smp_processor_id()) = left;

/*
Expand Down
4 changes: 0 additions & 4 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,6 @@ struct perf_cpu_context {
extern int perf_max_counters;

#ifdef CONFIG_PERF_COUNTERS
extern void
perf_counter_show(struct perf_counter *counter, char *str, int trace);
extern const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter);

Expand All @@ -237,8 +235,6 @@ extern int perf_counter_task_enable(void);

#else
static inline void
perf_counter_show(struct perf_counter *counter, char *str, int trace) { }
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
static inline void
perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
Expand Down
8 changes: 0 additions & 8 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -861,8 +861,6 @@ static void task_clock_perf_counter_update(struct perf_counter *counter)
atomic64_set(&counter->hw.prev_count, now);

delta = now - prev;
if (WARN_ON_ONCE(delta < 0))
delta = 0;

atomic64_add(delta, &counter->count);
}
Expand Down Expand Up @@ -906,8 +904,6 @@ static void page_faults_perf_counter_update(struct perf_counter *counter)
atomic64_set(&counter->hw.prev_count, now);

delta = now - prev;
if (WARN_ON_ONCE(delta < 0))
delta = 0;

atomic64_add(delta, &counter->count);
}
Expand Down Expand Up @@ -954,8 +950,6 @@ static void context_switches_perf_counter_update(struct perf_counter *counter)
atomic64_set(&counter->hw.prev_count, now);

delta = now - prev;
if (WARN_ON_ONCE(delta < 0))
delta = 0;

atomic64_add(delta, &counter->count);
}
Expand Down Expand Up @@ -1000,8 +994,6 @@ static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
atomic64_set(&counter->hw.prev_count, now);

delta = now - prev;
if (WARN_ON_ONCE(delta < 0))
delta = 0;

atomic64_add(delta, &counter->count);
}
Expand Down

0 comments on commit 1f06e1c

Please sign in to comment.