Skip to content

Commit

Permalink
perf events: Remove arg from perf sched hooks
Browse files Browse the repository at this point in the history
Since we only ever schedule the local cpu, there is no need to pass the
cpu number to the perf sched hooks.

This micro-optimizes things a bit.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Dec 28, 2009
1 parent 4cf4013 commit 49f4743
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 22 deletions.
12 changes: 6 additions & 6 deletions include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -746,10 +746,10 @@ extern int perf_max_events;

extern const struct pmu *hw_perf_event_init(struct perf_event *event);

extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu);
extern void perf_event_task_tick(struct task_struct *task, int cpu);
struct task_struct *next);
extern void perf_event_task_tick(struct task_struct *task);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
Expand Down Expand Up @@ -870,12 +870,12 @@ extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
perf_event_task_sched_in(struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu) { }
struct task_struct *next) { }
static inline void
perf_event_task_tick(struct task_struct *task, int cpu) { }
perf_event_task_tick(struct task_struct *task) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
Expand Down
27 changes: 14 additions & 13 deletions kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
* not restart the event.
*/
void perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu)
struct task_struct *next)
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
Expand Down Expand Up @@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)

static void
__perf_event_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx, int cpu)
struct perf_cpu_context *cpuctx)
{
int cpu = smp_processor_id();
struct perf_event *event;
int can_add_hw = 1;

Expand Down Expand Up @@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx,
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
void perf_event_task_sched_in(struct task_struct *task, int cpu)
void perf_event_task_sched_in(struct task_struct *task)
{
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;

if (likely(!ctx))
return;
if (cpuctx->task_ctx == ctx)
return;
__perf_event_sched_in(ctx, cpuctx, cpu);
__perf_event_sched_in(ctx, cpuctx);
cpuctx->task_ctx = ctx;
}

static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = &cpuctx->ctx;

__perf_event_sched_in(ctx, cpuctx, cpu);
__perf_event_sched_in(ctx, cpuctx);
}

#define MAX_INTERRUPTS (~0ULL)
Expand Down Expand Up @@ -1461,15 +1462,15 @@ static void rotate_ctx(struct perf_event_context *ctx)
raw_spin_unlock(&ctx->lock);
}

void perf_event_task_tick(struct task_struct *curr, int cpu)
void perf_event_task_tick(struct task_struct *curr)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;

if (!atomic_read(&nr_events))
return;

cpuctx = &per_cpu(perf_cpu_context, cpu);
cpuctx = &__get_cpu_var(perf_cpu_context);
ctx = curr->perf_event_ctxp;

perf_ctx_adjust_freq(&cpuctx->ctx);
Expand All @@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
if (ctx)
rotate_ctx(ctx);

perf_event_cpu_sched_in(cpuctx, cpu);
perf_event_cpu_sched_in(cpuctx);
if (ctx)
perf_event_task_sched_in(curr, cpu);
perf_event_task_sched_in(curr);
}

/*
Expand Down Expand Up @@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)

raw_spin_unlock(&ctx->lock);

perf_event_task_sched_in(task, smp_processor_id());
perf_event_task_sched_in(task);
out:
local_irq_restore(flags);
}
Expand Down
6 changes: 3 additions & 3 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2752,7 +2752,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
perf_event_task_sched_in(current, cpu_of(rq));
perf_event_task_sched_in(current);
finish_lock_switch(rq, prev);

fire_sched_in_preempt_notifiers(current);
Expand Down Expand Up @@ -5266,7 +5266,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);

perf_event_task_tick(curr, cpu);
perf_event_task_tick(curr);

#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
Expand Down Expand Up @@ -5480,7 +5480,7 @@ asmlinkage void __sched schedule(void)

if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next, cpu);
perf_event_task_sched_out(prev, next);

rq->nr_switches++;
rq->curr = next;
Expand Down

0 comments on commit 49f4743

Please sign in to comment.