Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147221
b: refs/heads/master
c: c07c99b
h: refs/heads/master
i:
  147219: 5dc4120
v: v3
  • Loading branch information
Paul Mackerras authored and Ingo Molnar committed Feb 13, 2009
1 parent 7c8b58e commit 56355bd
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b1864e9a1afef41709886072c6e6248def0386f4
refs/heads/master: c07c99b67233ccaad38a961c17405dc1e1542aa4
1 change: 1 addition & 0 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ struct perf_counter {
const struct hw_perf_counter_ops *hw_ops;

enum perf_counter_active_state state;
enum perf_counter_active_state prev_state;
atomic64_t count;

struct perf_counter_hw_event hw_event;
Expand Down
21 changes: 15 additions & 6 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,7 @@ static void __perf_install_in_context(void *info)

list_add_counter(counter, ctx);
ctx->nr_counters++;
counter->prev_state = PERF_COUNTER_STATE_OFF;

/*
* Don't put the counter on if it is disabled or if
Expand Down Expand Up @@ -562,6 +563,7 @@ static void __perf_counter_enable(void *info)
curr_rq_lock_irq_save(&flags);
spin_lock(&ctx->lock);

counter->prev_state = counter->state;
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
goto unlock;
counter->state = PERF_COUNTER_STATE_INACTIVE;
Expand Down Expand Up @@ -733,13 +735,15 @@ group_sched_in(struct perf_counter *group_counter,
if (ret)
return ret < 0 ? ret : 0;

group_counter->prev_state = group_counter->state;
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
return -EAGAIN;

/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
counter->prev_state = counter->state;
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
partial_group = counter;
goto group_error;
Expand Down Expand Up @@ -1398,9 +1402,9 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)

static int task_clock_perf_counter_enable(struct perf_counter *counter)
{
u64 now = task_clock_perf_counter_val(counter, 0);

atomic64_set(&counter->hw.prev_count, now);
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
atomic64_set(&counter->hw.prev_count,
task_clock_perf_counter_val(counter, 0));

return 0;
}
Expand Down Expand Up @@ -1455,7 +1459,8 @@ static void page_faults_perf_counter_read(struct perf_counter *counter)

static int page_faults_perf_counter_enable(struct perf_counter *counter)
{
atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
return 0;
}

Expand Down Expand Up @@ -1501,7 +1506,9 @@ static void context_switches_perf_counter_read(struct perf_counter *counter)

static int context_switches_perf_counter_enable(struct perf_counter *counter)
{
atomic64_set(&counter->hw.prev_count, get_context_switches(counter));
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
atomic64_set(&counter->hw.prev_count,
get_context_switches(counter));
return 0;
}

Expand Down Expand Up @@ -1547,7 +1554,9 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter)

static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
{
atomic64_set(&counter->hw.prev_count, get_cpu_migrations(counter));
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
atomic64_set(&counter->hw.prev_count,
get_cpu_migrations(counter));
return 0;
}

Expand Down

0 comments on commit 56355bd

Please sign in to comment.