Skip to content

Commit

Permalink
perf_counter: initialize the per-cpu context earlier
Browse files Browse the repository at this point in the history
percpu scheduling for perfcounters wants to take the context lock,
but that lock first needs to be initialized. Currently it is an
early_initcall() - but that is too late, the task tick runs much
sooner than that.

Call it explicitly from the scheduler init sequence instead.

[ Impact: fix access-before-init crash ]

LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Ingo Molnar committed May 4, 2009
1 parent ba77813 commit 0d905bc
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 6 deletions.
5 changes: 4 additions & 1 deletion include/linux/perf_counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -573,6 +573,8 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);

extern int sysctl_perf_counter_priv;

extern void perf_counter_init(void);

#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
Expand Down Expand Up @@ -600,9 +602,10 @@ perf_counter_mmap(unsigned long addr, unsigned long len,

static inline void
perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file) { }
unsigned long pgoff, struct file *file) { }

static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
#endif

#endif /* __KERNEL__ */
Expand Down
5 changes: 1 addition & 4 deletions kernel/perf_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -3265,15 +3265,12 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
.notifier_call = perf_cpu_notify,
};

static int __init perf_counter_init(void)
void __init perf_counter_init(void)
{
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&perf_cpu_nb);

return 0;
}
early_initcall(perf_counter_init);

static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
Expand Down
5 changes: 4 additions & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
#include <linux/perf_counter.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
Expand Down Expand Up @@ -8996,7 +8997,7 @@ void __init sched_init(void)
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting init_task_group's tasks sit
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
Expand Down Expand Up @@ -9097,6 +9098,8 @@ void __init sched_init(void)
alloc_bootmem_cpumask_var(&cpu_isolated_map);
#endif /* SMP */

perf_counter_init();

scheduler_running = 1;
}

Expand Down

0 comments on commit 0d905bc

Please sign in to comment.