Skip to content

Commit

Permalink
sched: fix missing locking in sched_domains code
Browse files Browse the repository at this point in the history
Concurrent calls to detach_destroy_domains and arch_init_sched_domains
were prevented by the old scheduler subsystem cpu hotplug mutex. When
this got converted to get_online_cpus() the locking got broken.
Unlike before now several processes can concurrently enter the critical
sections that were protected by the old lock.

So use the already present doms_cur_mutex to protect these sections again.

Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Heiko Carstens authored and Ingo Molnar committed May 5, 2008
1 parent 690229a commit 712555e
Showing 1 changed file with 12 additions and 17 deletions.
29 changes: 12 additions & 17 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,12 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
}
#endif

/*
* sched_domains_mutex serializes calls to arch_init_sched_domains,
* detach_destroy_domains and partition_sched_domains.
*/
static DEFINE_MUTEX(sched_domains_mutex);

#ifdef CONFIG_GROUP_SCHED

#include <linux/cgroup.h>
Expand Down Expand Up @@ -308,9 +314,6 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
*/
static DEFINE_SPINLOCK(task_group_lock);

/* doms_cur_mutex serializes access to doms_cur[] array */
static DEFINE_MUTEX(doms_cur_mutex);

#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
Expand Down Expand Up @@ -358,21 +361,9 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#endif
}

static inline void lock_doms_cur(void)
{
mutex_lock(&doms_cur_mutex);
}

static inline void unlock_doms_cur(void)
{
mutex_unlock(&doms_cur_mutex);
}

#else

static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline void lock_doms_cur(void) { }
static inline void unlock_doms_cur(void) { }

#endif /* CONFIG_GROUP_SCHED */

Expand Down Expand Up @@ -7822,7 +7813,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
{
int i, j;

lock_doms_cur();
mutex_lock(&sched_domains_mutex);

/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
Expand Down Expand Up @@ -7871,7 +7862,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,

register_sched_domain_sysctl();

unlock_doms_cur();
mutex_unlock(&sched_domains_mutex);
}

#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Expand All @@ -7880,8 +7871,10 @@ int arch_reinit_sched_domains(void)
int err;

get_online_cpus();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();

return err;
Expand Down Expand Up @@ -7999,10 +7992,12 @@ void __init sched_init_smp(void)
BUG_ON(sched_group_nodes_bycpu == NULL);
#endif
get_online_cpus();
mutex_lock(&sched_domains_mutex);
arch_init_sched_domains(&cpu_online_map);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
Expand Down

0 comments on commit 712555e

Please sign in to comment.