Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147188
b: refs/heads/master
c: 3cbed42
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mackerras committed Jan 9, 2009
1 parent 6af5001 commit 307933f
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9abf8a08bc8f18a3b125f834f00e2e71b49c15d2
refs/heads/master: 3cbed429a9ccdb7a243f733b1056fe5c39e9004c
3 changes: 3 additions & 0 deletions trunk/include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,9 @@ extern u64 hw_perf_save_disable(void);
extern void hw_perf_restore(u64 ctrl);
extern int perf_counter_task_disable(void);
extern int perf_counter_task_enable(void);
extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, int cpu);

#else
static inline void
Expand Down
31 changes: 26 additions & 5 deletions trunk/kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,12 @@ hw_perf_counter_init(struct perf_counter *counter)
u64 __weak hw_perf_save_disable(void) { return 0; }
void __weak hw_perf_restore(u64 ctrl) { barrier(); }
void __weak hw_perf_counter_setup(void) { barrier(); }
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, int cpu)
{
return 0;
}

static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
Expand Down Expand Up @@ -341,6 +347,9 @@ group_sched_out(struct perf_counter *group_counter,
{
struct perf_counter *counter;

if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
return;

counter_sched_out(group_counter, cpuctx, ctx);

/*
Expand All @@ -354,15 +363,18 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_counter *counter;
u64 flags;

if (likely(!ctx->nr_counters))
return;

spin_lock(&ctx->lock);
flags = hw_perf_save_disable();
if (ctx->nr_active) {
list_for_each_entry(counter, &ctx->counter_list, list_entry)
group_sched_out(counter, cpuctx, ctx);
}
hw_perf_restore(flags);
spin_unlock(&ctx->lock);
}

Expand Down Expand Up @@ -402,7 +414,14 @@ group_sched_in(struct perf_counter *group_counter,
int cpu)
{
struct perf_counter *counter, *partial_group;
int ret = 0;
int ret;

if (group_counter->state == PERF_COUNTER_STATE_OFF)
return 0;

ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
if (ret)
return ret < 0 ? ret : 0;

if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
return -EAGAIN;
Expand All @@ -415,10 +434,9 @@ group_sched_in(struct perf_counter *group_counter,
partial_group = counter;
goto group_error;
}
ret = -EAGAIN;
}

return ret;
return 0;

group_error:
/*
Expand All @@ -440,11 +458,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
struct perf_cpu_context *cpuctx, int cpu)
{
struct perf_counter *counter;
u64 flags;

if (likely(!ctx->nr_counters))
return;

spin_lock(&ctx->lock);
flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
/*
* Listen to the 'cpu' scheduling filter constraint
Expand All @@ -454,12 +474,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
continue;

/*
* If we scheduled in a group atomically and
* exclusively, break out:
* If we scheduled in a group atomically and exclusively,
* or if this group can't go on, break out:
*/
if (group_sched_in(counter, cpuctx, ctx, cpu))
break;
}
hw_perf_restore(flags);
spin_unlock(&ctx->lock);
}

Expand Down

0 comments on commit 307933f

Please sign in to comment.