diff --git a/[refs] b/[refs] index ffed0c097233..839b11da7d38 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 35cf4e50b16331def6cfcbee11e49270b6db07f5 +refs/heads/master: e221d028bb08b47e624c5f0a31732c642db9d19a diff --git a/trunk/kernel/sched/rt.c b/trunk/kernel/sched/rt.c index 573e1ca01102..944cb68420e9 100644 --- a/trunk/kernel/sched/rt.c +++ b/trunk/kernel/sched/rt.c @@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) const struct cpumask *span; span = sched_rt_period_mask(); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * FIXME: isolated CPUs should really leave the root task group, + * whether they are isolcpus or were isolated via cpusets, lest + * the timer run on a CPU which does not service all runqueues, + * potentially leaving other CPUs indefinitely throttled. If + * isolation is really required, the user will turn the throttle + * off to kill the perturbations it causes anyway. Meanwhile, + * this maintains functionality for boot and/or troubleshooting. + */ + if (rt_b == &root_task_group.rt_bandwidth) + span = cpu_online_mask; +#endif for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);