Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147175
b: refs/heads/master
c: 95cdd2e
h: refs/heads/master
i:
  147173: 14b2bfa
  147171: 1a055cc
  147167: 7ddc48a
v: v3
  • Loading branch information
Ingo Molnar committed Dec 23, 2008
1 parent 637439e commit d2631f2
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 78b6084c907cea15bb40a564b974e072f5163781
refs/heads/master: 95cdd2e7851cce79ab839cb0b3cbe68d7911d0f1
6 changes: 5 additions & 1 deletion trunk/arch/x86/kernel/cpu/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ static int fixed_mode_idx(struct hw_perf_counter *hwc)
/*
* Find a PMC slot for the freshly enabled / scheduled in counter:
*/
static void pmc_generic_enable(struct perf_counter *counter)
static int pmc_generic_enable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
struct hw_perf_counter *hwc = &counter->hw;
Expand All @@ -253,6 +253,8 @@ static void pmc_generic_enable(struct perf_counter *counter)
/* Try to get the previous counter again */
if (test_and_set_bit(idx, cpuc->used)) {
idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
if (idx == nr_counters_generic)
return -EAGAIN;
set_bit(idx, cpuc->used);
hwc->idx = idx;
}
Expand All @@ -265,6 +267,8 @@ static void pmc_generic_enable(struct perf_counter *counter)

__hw_perf_counter_set_period(counter, hwc, idx);
__pmc_generic_enable(counter, hwc, idx);

return 0;
}

void perf_counter_print_debug(void)
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ struct perf_counter;
* struct hw_perf_counter_ops - performance counter hw ops
*/
struct hw_perf_counter_ops {
void (*enable) (struct perf_counter *counter);
int (*enable) (struct perf_counter *counter);
void (*disable) (struct perf_counter *counter);
void (*read) (struct perf_counter *counter);
};
Expand Down
62 changes: 45 additions & 17 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,21 +355,25 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
cpuctx->task_ctx = NULL;
}

static void
static int
counter_sched_in(struct perf_counter *counter,
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx,
int cpu)
{
if (counter->state == PERF_COUNTER_STATE_OFF)
return;
return 0;

if (counter->hw_ops->enable(counter))
return -EAGAIN;

counter->hw_ops->enable(counter);
counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */

cpuctx->active_oncpu++;
ctx->nr_active++;

return 0;
}

static int
Expand All @@ -378,20 +382,38 @@ group_sched_in(struct perf_counter *group_counter,
struct perf_counter_context *ctx,
int cpu)
{
struct perf_counter *counter;
int was_group = 0;
struct perf_counter *counter, *partial_group;
int ret = 0;

counter_sched_in(group_counter, cpuctx, ctx, cpu);
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
return -EAGAIN;

/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
counter_sched_in(counter, cpuctx, ctx, cpu);
was_group = 1;
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
partial_group = counter;
goto group_error;
}
ret = -EAGAIN;
}

return was_group;
return ret;

group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
*/
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
if (counter == partial_group)
break;
counter_sched_out(counter, cpuctx, ctx);
}
counter_sched_out(group_counter, cpuctx, ctx);

return -EAGAIN;
}

/*
Expand All @@ -416,9 +438,6 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)

spin_lock(&ctx->lock);
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (ctx->nr_active == cpuctx->max_pertask)
break;

/*
* Listen to the 'cpu' scheduling filter constraint
* of counters:
Expand Down Expand Up @@ -856,8 +875,9 @@ static const struct file_operations perf_fops = {
.poll = perf_poll,
};

static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
return 0;
}

static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
Expand Down Expand Up @@ -913,11 +933,13 @@ static void task_clock_perf_counter_read(struct perf_counter *counter)
task_clock_perf_counter_update(counter, now);
}

static void task_clock_perf_counter_enable(struct perf_counter *counter)
static int task_clock_perf_counter_enable(struct perf_counter *counter)
{
u64 now = task_clock_perf_counter_val(counter, 0);

atomic64_set(&counter->hw.prev_count, now);

return 0;
}

static void task_clock_perf_counter_disable(struct perf_counter *counter)
Expand Down Expand Up @@ -960,12 +982,14 @@ static void page_faults_perf_counter_read(struct perf_counter *counter)
page_faults_perf_counter_update(counter);
}

static void page_faults_perf_counter_enable(struct perf_counter *counter)
static int page_faults_perf_counter_enable(struct perf_counter *counter)
{
/*
* page-faults is a per-task value already,
* so we dont have to clear it on switch-in.
*/

return 0;
}

static void page_faults_perf_counter_disable(struct perf_counter *counter)
Expand Down Expand Up @@ -1006,12 +1030,14 @@ static void context_switches_perf_counter_read(struct perf_counter *counter)
context_switches_perf_counter_update(counter);
}

static void context_switches_perf_counter_enable(struct perf_counter *counter)
static int context_switches_perf_counter_enable(struct perf_counter *counter)
{
/*
* ->nvcsw + curr->nivcsw is a per-task value already,
* so we dont have to clear it on switch-in.
*/

return 0;
}

static void context_switches_perf_counter_disable(struct perf_counter *counter)
Expand Down Expand Up @@ -1050,12 +1076,14 @@ static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
cpu_migrations_perf_counter_update(counter);
}

static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
{
/*
* se.nr_migrations is a per-task value already,
* so we dont have to clear it on switch-in.
*/

return 0;
}

static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
Expand Down

0 comments on commit d2631f2

Please sign in to comment.