diff --git a/[refs] b/[refs] index 10ea95cce605..a039e3d0fd20 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 082cf69eb82681f4eacb3a5653834c7970714bef +refs/heads/master: f340c0d1a3f40fdcba69cd291530a4debc58748f diff --git a/trunk/init/main.c b/trunk/init/main.c index d324801729ba..b5e421e39ede 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -383,6 +383,13 @@ static void noinline rest_init(void) numa_default_policy(); unlock_kernel(); preempt_enable_no_resched(); + + /* + * The boot idle thread must execute schedule() + * at least one to get things moving: + */ + schedule(); + cpu_idle(); } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index e2b0d3e4dd06..5f2182d42241 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -4166,6 +4166,14 @@ void show_state(void) read_unlock(&tasklist_lock); } +/** + * init_idle - set up an idle thread for a given CPU + * @idle: task in question + * @cpu: cpu the idle task belongs to + * + * NOTE: this function does not set the idle thread's NEED_RESCHED + * flag, to make booting more robust. + */ void __devinit init_idle(task_t *idle, int cpu) { runqueue_t *rq = cpu_rq(cpu); @@ -4183,7 +4191,6 @@ void __devinit init_idle(task_t *idle, int cpu) #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) idle->oncpu = 1; #endif - set_tsk_need_resched(idle); spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */