diff --git a/[refs] b/[refs] index 672e08bbf49f..476ae95c5697 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 414bee9ba613adb3804965e2d84db32d0599f9c6 +refs/heads/master: a8941d7ec81678fb69aea7183338175f112f3e0d diff --git a/trunk/kernel/sched_idletask.c b/trunk/kernel/sched_idletask.c index 9fa0f402c87c..41eb62a0808b 100644 --- a/trunk/kernel/sched_idletask.c +++ b/trunk/kernel/sched_idletask.c @@ -52,31 +52,16 @@ static void set_curr_task_idle(struct rq *rq) { } -static void switched_to_idle(struct rq *rq, struct task_struct *p, - int running) +static void +switched_to_idle(struct rq *rq, struct task_struct *p, int running) { - /* Can this actually happen?? */ - if (running) - resched_task(rq->curr); - else - check_preempt_curr(rq, p, 0); + BUG(); } static void prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio, int running) { - /* This can happen for hot plug CPUS */ - - /* - * Reschedule if we are currently running on this runqueue and - * our priority decreased, or if we are not currently running on - * this runqueue and our priority is higher than the current's - */ - if (running) { - if (p->prio > oldprio) - resched_task(rq->curr); - } else - check_preempt_curr(rq, p, 0); + BUG(); } static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)