Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 164341
b: refs/heads/master
c: e7d8842
h: refs/heads/master
i:
  164339: 2f7878d
v: v3
  • Loading branch information
Paul E. McKenney authored and Ingo Molnar committed Sep 19, 2009
1 parent c9739b7 commit 6f204e5
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 28ecd58020409be8eb176c716f957fc3386fa2fa
refs/heads/master: e7d8842ed34a7fe19d1ed90f84c211fb056ac523
27 changes: 5 additions & 22 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -767,10 +767,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,

/*
* Record a quiescent state for the specified CPU, which must either be
* the current CPU or an offline CPU. The lastcomp argument is used to
* make sure we are still in the grace period of interest. We don't want
* to end the current grace period based on quiescent states detected in
* an earlier grace period!
* the current CPU. The lastcomp argument is used to make sure we are
* still in the grace period of interest. We don't want to end the current
* grace period based on quiescent states detected in an earlier grace
* period!
*/
static void
cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
Expand Down Expand Up @@ -805,7 +805,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
* This GP can't end until cpu checks in, so all of our
* callbacks can be processed during the next GP.
*/
rdp = rsp->rda[smp_processor_id()];
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];

cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
Expand Down Expand Up @@ -881,9 +880,6 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)

spin_unlock(&rsp->onofflock); /* irqs remain disabled. */

/* Being offline is a quiescent state, so go record it. */
cpu_quiet(cpu, rsp, rdp, lastcomp);

/*
* Move callbacks from the outgoing CPU to the running CPU.
* Note that the outgoing CPU is now quiscent, so it is now
Expand Down Expand Up @@ -1448,20 +1444,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
rnp = rnp->parent;
} while (rnp != NULL && !(rnp->qsmaskinit & mask));

spin_unlock(&rsp->onofflock); /* irqs remain disabled. */

/*
* A new grace period might start here. If so, we will be part of
* it, and its gpnum will be greater than ours, so we will
* participate. It is also possible for the gpnum to have been
* incremented before this function was called, and the bitmasks
* to not be filled out until now, in which case we will also
* participate due to our gpnum being behind.
*/

/* Since it is coming online, the CPU is in a quiescent state. */
cpu_quiet(cpu, rsp, rdp, lastcomp);
local_irq_restore(flags);
spin_unlock_irqrestore(&rsp->onofflock, flags);
}

static void __cpuinit rcu_online_cpu(int cpu)
Expand Down
10 changes: 6 additions & 4 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,9 @@ static void rcu_preempt_note_context_switch(int cpu)
* on line!
*/
WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
smp_mb(); /* Ensure later ctxt swtch seen after above. */
spin_unlock_irqrestore(&rnp->lock, flags);
}

Expand All @@ -133,7 +133,9 @@ static void rcu_preempt_note_context_switch(int cpu)
* means that we continue to block the current grace period.
*/
rcu_preempt_qs(cpu);
local_irq_save(flags);
t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
local_irq_restore(flags);
}

/*
Expand Down Expand Up @@ -189,10 +191,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
*/
for (;;) {
rnp = t->rcu_blocked_node;
spin_lock(&rnp->lock);
spin_lock(&rnp->lock); /* irqs already disabled. */
if (rnp == t->rcu_blocked_node)
break;
spin_unlock(&rnp->lock);
spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
list_del_init(&t->rcu_node_entry);
Expand Down

0 comments on commit 6f204e5

Please sign in to comment.