Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 182386
b: refs/heads/master
c: 5b0311e
h: refs/heads/master
v: v3
  • Loading branch information
Frederic Weisbecker committed Jan 17, 2010
1 parent 7b6e197 commit 931fa5f
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 45 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 42cce92f4ddfa41e2dfe26fdcad4887943c032f2
refs/heads/master: 5b0311e1f2464547fc6f17a82d7ea2538c8c7a70
137 changes: 93 additions & 44 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1049,8 +1049,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
return 0;
}

static void __perf_event_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};

static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event *event;

Expand All @@ -1061,13 +1068,18 @@ static void __perf_event_sched_out(struct perf_event_context *ctx,
update_context_time(ctx);

perf_disable();
if (ctx->nr_active) {
if (!ctx->nr_active)
goto out_enable;

if (event_type & EVENT_PINNED)
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);

if (event_type & EVENT_FLEXIBLE)
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}

out_enable:
perf_enable();
out:
raw_spin_unlock(&ctx->lock);
Expand Down Expand Up @@ -1229,15 +1241,13 @@ void perf_event_task_sched_out(struct task_struct *task,
rcu_read_unlock();

if (do_switch) {
__perf_event_sched_out(ctx, cpuctx);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
}

/*
* Called with IRQs disabled
*/
static void __perf_event_task_sched_out(struct perf_event_context *ctx)
static void task_ctx_sched_out(struct perf_event_context *ctx,
enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);

Expand All @@ -1247,39 +1257,34 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx)
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;

__perf_event_sched_out(ctx, cpuctx);
ctx_sched_out(ctx, cpuctx, event_type);
cpuctx->task_ctx = NULL;
}

/*
* Called with IRQs disabled
*/
static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
static void __perf_event_task_sched_out(struct perf_event_context *ctx)
{
__perf_event_sched_out(&cpuctx->ctx, cpuctx);
task_ctx_sched_out(ctx, EVENT_ALL);
}

/*
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}

static void
__perf_event_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
int cpu)
{
int cpu = smp_processor_id();
struct perf_event *event;
int can_add_hw = 1;

raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;

ctx->timestamp = perf_clock();

perf_disable();

/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
Expand All @@ -1298,6 +1303,15 @@ __perf_event_sched_in(struct perf_event_context *ctx,
event->state = PERF_EVENT_STATE_ERROR;
}
}
}

static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
int cpu)
{
struct perf_event *event;
int can_add_hw = 1;

list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
/* Ignore events in OFF or ERROR state */
Expand All @@ -1314,11 +1328,53 @@ __perf_event_sched_in(struct perf_event_context *ctx,
if (group_sched_in(event, cpuctx, ctx, cpu))
can_add_hw = 0;
}
}

static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
int cpu = smp_processor_id();

raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;

ctx->timestamp = perf_clock();

perf_disable();

/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
if (event_type & EVENT_PINNED)
ctx_pinned_sched_in(ctx, cpuctx, cpu);

/* Then walk through the lower prio flexible groups */
if (event_type & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx, cpu);

perf_enable();
out:
raw_spin_unlock(&ctx->lock);
}

static void task_ctx_sched_in(struct task_struct *task,
enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;

if (likely(!ctx))
return;
if (cpuctx->task_ctx == ctx)
return;
ctx_sched_in(ctx, cpuctx, event_type);
cpuctx->task_ctx = ctx;
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
Expand All @@ -1332,22 +1388,15 @@ __perf_event_sched_in(struct perf_event_context *ctx,
*/
void perf_event_task_sched_in(struct task_struct *task)
{
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;

if (likely(!ctx))
return;
if (cpuctx->task_ctx == ctx)
return;
__perf_event_sched_in(ctx, cpuctx);
cpuctx->task_ctx = ctx;
task_ctx_sched_in(task, EVENT_ALL);
}

static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event_context *ctx = &cpuctx->ctx;

__perf_event_sched_in(ctx, cpuctx);
ctx_sched_in(ctx, cpuctx, event_type);
}

#define MAX_INTERRUPTS (~0ULL)
Expand Down Expand Up @@ -1476,17 +1525,17 @@ void perf_event_task_tick(struct task_struct *curr)
if (ctx)
perf_ctx_adjust_freq(ctx);

perf_event_cpu_sched_out(cpuctx);
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
if (ctx)
__perf_event_task_sched_out(ctx);
task_ctx_sched_out(ctx, EVENT_ALL);

rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);

perf_event_cpu_sched_in(cpuctx);
cpu_ctx_sched_in(cpuctx, EVENT_ALL);
if (ctx)
perf_event_task_sched_in(curr);
task_ctx_sched_in(curr, EVENT_ALL);
}

static int event_enable_on_exec(struct perf_event *event,
Expand Down

0 comments on commit 931fa5f

Please sign in to comment.