Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 158170
b: refs/heads/master
c: 8684896
h: refs/heads/master
v: v3
  • Loading branch information
Paul E. McKenney authored and Ingo Molnar committed Aug 29, 2009
1 parent e18804c commit 3ff9771
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dd5d19bafd90d33043a4a14b2e2d98612caa293c
refs/heads/master: 868489660dabc0c28087cca3dbc1adbbc398c6fe
4 changes: 3 additions & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1163,6 +1163,8 @@ struct sched_rt_entity {
#endif
};

struct rcu_node;

struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
Expand Down Expand Up @@ -1208,7 +1210,7 @@ struct task_struct {
#ifdef CONFIG_TREE_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
void *rcu_blocked_node;
struct rcu_node *rcu_blocked_node;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */

Expand Down
13 changes: 6 additions & 7 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
#endif /* #ifdef CONFIG_SMP */

#ifdef CONFIG_NO_HZ
static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);

/**
* rcu_enter_nohz - inform RCU that current CPU is entering nohz
Expand All @@ -249,7 +248,7 @@ void rcu_enter_nohz(void)
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp->dynticks++;
rdtp->dynticks_nesting--;
WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
WARN_ON_ONCE(rdtp->dynticks & 0x1);
local_irq_restore(flags);
}

Expand All @@ -268,7 +267,7 @@ void rcu_exit_nohz(void)
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp->dynticks++;
rdtp->dynticks_nesting++;
WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
local_irq_restore(flags);
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
Expand All @@ -287,7 +286,7 @@ void rcu_nmi_enter(void)
if (rdtp->dynticks & 0x1)
return;
rdtp->dynticks_nmi++;
WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}

Expand All @@ -306,7 +305,7 @@ void rcu_nmi_exit(void)
return;
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
rdtp->dynticks_nmi++;
WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
}

/**
Expand All @@ -322,7 +321,7 @@ void rcu_irq_enter(void)
if (rdtp->dynticks_nesting++)
return;
rdtp->dynticks++;
WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}

Expand All @@ -341,7 +340,7 @@ void rcu_irq_exit(void)
return;
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
rdtp->dynticks++;
WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
WARN_ON_ONCE(rdtp->dynticks & 0x1);

/* If the interrupt queued a callback, get out of dyntick mode. */
if (__get_cpu_var(rcu_sched_data).nxtlist ||
Expand Down
2 changes: 2 additions & 0 deletions trunk/kernel/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ struct rcu_dynticks {
struct rcu_node {
spinlock_t lock;
long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
unsigned long qsmaskinit;
Expand Down
10 changes: 6 additions & 4 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ static void rcu_preempt_qs(int cpu)
rnp = rdp->mynode;
spin_lock(&rnp->lock);
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
t->rcu_blocked_node = (void *)rnp;
t->rcu_blocked_node = rnp;

/*
* If this CPU has already checked in, then this task
Expand Down Expand Up @@ -176,9 +176,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
* most one time. So at most two passes through loop.
*/
for (;;) {
rnp = (struct rcu_node *)t->rcu_blocked_node;
rnp = t->rcu_blocked_node;
spin_lock(&rnp->lock);
if (rnp == (struct rcu_node *)t->rcu_blocked_node)
if (rnp == t->rcu_blocked_node)
break;
spin_unlock(&rnp->lock);
}
Expand Down Expand Up @@ -288,8 +288,10 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
struct rcu_node *rnp_root = rcu_get_root(rsp);
struct task_struct *tp;

if (rnp == rnp_root)
if (rnp == rnp_root) {
WARN_ONCE(1, "Last CPU thought to be offlined?");
return; /* Shouldn't happen: at least one CPU online. */
}

/*
* Move tasks up to root rcu_node. Rely on the fact that the
Expand Down

0 comments on commit 3ff9771

Please sign in to comment.