Skip to content

Commit

Permalink
sched: Fix TASK_WAKING vs fork deadlock
Browse files Browse the repository at this point in the history
Oleg noticed a few races with the TASK_WAKING usage on fork.

 - since TASK_WAKING is basically a spinlock, it should be IRQ safe
 - since we set TASK_WAKING (*) without holding rq->lock it could
   be there still is a rq->lock holder, thereby not actually
   providing full serialization.

(*) in fact we clear PF_STARTING, which in effect enables TASK_WAKING.

Cure the second issue by not setting TASK_WAKING in sched_fork(), but
only temporarily in wake_up_new_task() while calling select_task_rq().

Cure the first by holding rq->lock around the select_task_rq() call,
this will disable IRQs, this however requires that we push down the
rq->lock release into select_task_rq_fair()'s cgroup stuff.

Because select_task_rq_fair() still needs to drop the rq->lock we
cannot fully get rid of TASK_WAKING.

Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 2, 2010
1 parent 9084bb8 commit 0017d73
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 48 deletions.
3 changes: 2 additions & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1046,7 +1046,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);

#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
int (*select_task_rq)(struct rq *rq, struct task_struct *p,
int sd_flag, int flags);

void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
Expand Down
65 changes: 24 additions & 41 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -916,14 +916,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
/*
* Check whether the task is waking, we use this to synchronize against
* ttwu() so that task_cpu() reports a stable number.
*
* We need to make an exception for PF_STARTING tasks because the fork
* path might require task_rq_lock() to work, eg. it can call
* set_cpus_allowed_ptr() from the cpuset clone_ns code.
*/
static inline int task_is_waking(struct task_struct *p)
{
return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING));
return unlikely(p->state == TASK_WAKING);
}

/*
Expand Down Expand Up @@ -2320,9 +2316,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
* The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
*/
static inline
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
{
int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);

/*
* In order not to call set_task_cpu() on a blocking task we need
Expand Down Expand Up @@ -2393,17 +2389,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
if (p->sched_class->task_waking)
p->sched_class->task_waking(rq, p);

__task_rq_unlock(rq);

cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu) {
/*
* Since we migrate the task without holding any rq->lock,
* we need to be careful with task_rq_lock(), since that
* might end up locking an invalid rq.
*/
cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
}
__task_rq_unlock(rq);

rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock);
Expand Down Expand Up @@ -2530,11 +2519,11 @@ void sched_fork(struct task_struct *p, int clone_flags)

__sched_fork(p);
/*
* We mark the process as waking here. This guarantees that
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_WAKING;
p->state = TASK_RUNNING;

/*
* Revert to default priority/policy on fork if requested.
Expand Down Expand Up @@ -2601,28 +2590,25 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
int cpu __maybe_unused = get_cpu();

#ifdef CONFIG_SMP
rq = task_rq_lock(p, &flags);
p->state = TASK_WAKING;

/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*
* We still have TASK_WAKING but PF_STARTING is gone now, meaning
* ->cpus_allowed is stable, we have preemption disabled, meaning
* cpu_online_mask is stable.
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
* without people poking at ->cpus_allowed.
*/
cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu);
#endif

/*
* Since the task is not on the rq and we still have TASK_WAKING set
* nobody else will migrate this task.
*/
rq = cpu_rq(cpu);
raw_spin_lock_irqsave(&rq->lock, flags);

BUG_ON(p->state != TASK_WAKING);
p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags);
#endif

rq = task_rq_lock(p, &flags);
activate_task(rq, p, 0);
trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK);
Expand Down Expand Up @@ -3068,19 +3054,15 @@ void sched_exec(void)
{
struct task_struct *p = current;
struct migration_req req;
int dest_cpu, this_cpu;
unsigned long flags;
struct rq *rq;

this_cpu = get_cpu();
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == this_cpu) {
put_cpu();
return;
}
int dest_cpu;

rq = task_rq_lock(p, &flags);
put_cpu();
dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;

/*
* select_task_rq() can race against ->cpus_allowed
*/
Expand All @@ -3098,6 +3080,7 @@ void sched_exec(void)

return;
}
unlock:
task_rq_unlock(rq, &flags);
}

Expand Down
8 changes: 6 additions & 2 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1423,7 +1423,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
*
* preempt must be disabled.
*/
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
static int
select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
{
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id();
Expand Down Expand Up @@ -1521,8 +1522,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
cpumask_weight(sched_domain_span(sd))))
tmp = affine_sd;

if (tmp)
if (tmp) {
raw_spin_unlock(&rq->lock);
update_shares(tmp);
raw_spin_lock(&rq->lock);
}
}
#endif

Expand Down
3 changes: 2 additions & 1 deletion kernel/sched_idletask.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
*/

#ifdef CONFIG_SMP
static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
static int
select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
Expand Down
5 changes: 2 additions & 3 deletions kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -948,10 +948,9 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
static int
select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
struct rq *rq = task_rq(p);

if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();

Expand Down

0 comments on commit 0017d73

Please sign in to comment.