diff --git a/[refs] b/[refs] index c7cbe949c77e..692f38983e9f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d14aada8e20bdf81ffd43f433b123972cf575b32 +refs/heads/master: 77e38ed347162423c6b72e06c865a121081c2bb6 diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 0ae2339ab04d..9b18227e86e8 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -1051,6 +1051,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); int rcu_needs_cpu(int cpu) { int c = 0; + int snap; + int snap_nmi; int thatcpu; /* Check for being in the holdoff period. */ @@ -1058,12 +1060,18 @@ int rcu_needs_cpu(int cpu) return rcu_needs_cpu_quick_check(cpu); /* Don't bother unless we are the last non-dyntick-idle CPU. */ - for_each_cpu_not(thatcpu, nohz_cpu_mask) - if (cpu_online(thatcpu) && thatcpu != cpu) { + for_each_online_cpu(thatcpu) { + if (thatcpu == cpu) + continue; + snap = per_cpu(rcu_dynticks, thatcpu)->dynticks; + snap_nmi = per_cpu(rcu_dynticks, thatcpu)->dynticks_nmi; + smp_mb(); /* Order sampling of snap with end of grace period. */ + if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { per_cpu(rcu_dyntick_drain, cpu) = 0; per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; return rcu_needs_cpu_quick_check(cpu); } + } /* Check and update the rcu_dyntick_drain sequencing. */ if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {