Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 252679
b: refs/heads/master
c: 08bca60
h: refs/heads/master
i:
  252677: 708db77
  252675: 09bf03d
  252671: 313509f
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed May 28, 2011
1 parent 00a40ca commit 2e16037
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 40 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8826f3b0397562eee6f8785d548be9dfdb169100
refs/heads/master: 08bca60a6912ad225254250c0a9c3a05b4152cfa
23 changes: 6 additions & 17 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
static char rcu_kthreads_spawnable;

Expand Down Expand Up @@ -1476,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void)
local_irq_restore(flags);
return;
}
wake_up(&__get_cpu_var(rcu_cpu_wq));
wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
local_irq_restore(flags);
}

Expand Down Expand Up @@ -1596,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg)
unsigned long flags;
int spincnt = 0;
unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);

for (;;) {
*statusp = RCU_KTHREAD_WAITING;
wait_event_interruptible(*wqp,
*workp != 0 || kthread_should_stop());
rcu_wait(*workp != 0 || kthread_should_stop());
local_bh_disable();
if (rcu_cpu_kthread_should_stop(cpu)) {
local_bh_enable();
Expand Down Expand Up @@ -1654,7 +1651,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
per_cpu(rcu_cpu_kthread_task, cpu) = t;
wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
Expand All @@ -1677,8 +1673,7 @@ static int rcu_node_kthread(void *arg)

for (;;) {
rnp->node_kthread_status = RCU_KTHREAD_WAITING;
wait_event_interruptible(rnp->node_wq,
atomic_read(&rnp->wakemask) != 0);
rcu_wait(atomic_read(&rnp->wakemask) != 0);
rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
raw_spin_lock_irqsave(&rnp->lock, flags);
mask = atomic_xchg(&rnp->wakemask, 0);
Expand Down Expand Up @@ -1762,7 +1757,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->node_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up_process(t);
sp.sched_priority = 99;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
}
Expand All @@ -1779,21 +1773,16 @@ static int __init rcu_spawn_kthreads(void)

rcu_kthreads_spawnable = 1;
for_each_possible_cpu(cpu) {
init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu))
(void)rcu_spawn_one_cpu_kthread(cpu);
}
rnp = rcu_get_root(rcu_state);
init_waitqueue_head(&rnp->node_wq);
rcu_init_boost_waitqueue(rnp);
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
if (NUM_RCU_NODES > 1)
rcu_for_each_leaf_node(rcu_state, rnp) {
init_waitqueue_head(&rnp->node_wq);
rcu_init_boost_waitqueue(rnp);
if (NUM_RCU_NODES > 1) {
rcu_for_each_leaf_node(rcu_state, rnp)
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
}
}
return 0;
}
early_initcall(rcu_spawn_kthreads);
Expand Down
17 changes: 10 additions & 7 deletions trunk/kernel/rcutree.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,6 @@ struct rcu_node {
struct task_struct *boost_kthread_task;
/* kthread that takes care of priority */
/* boosting for this rcu_node structure. */
wait_queue_head_t boost_wq;
/* Wait queue on which to park the boost */
/* kthread. */
unsigned int boost_kthread_status;
/* State of boost_kthread_task for tracing. */
unsigned long n_tasks_boosted;
Expand All @@ -188,9 +185,6 @@ struct rcu_node {
/* kthread that takes care of this rcu_node */
/* structure, for example, awakening the */
/* per-CPU kthreads as needed. */
wait_queue_head_t node_wq;
/* Wait queue on which to park the per-node */
/* kthread. */
unsigned int node_kthread_status;
/* State of node_kthread_task for tracing. */
} ____cacheline_internodealigned_in_smp;
Expand Down Expand Up @@ -336,6 +330,16 @@ struct rcu_data {
/* scheduling clock irq */
/* before ratting on them. */

#define rcu_wait(cond) \
do { \
for (;;) { \
set_current_state(TASK_INTERRUPTIBLE); \
if (cond) \
break; \
schedule(); \
} \
__set_current_state(TASK_RUNNING); \
} while (0)

/*
* RCU global state, including node hierarchy. This hierarchy is
Expand Down Expand Up @@ -445,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_send_cbs_to_online(void);
static void __init __rcu_init_preempt(void);
static void rcu_needs_cpu_flush(void);
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
cpumask_var_t cm);
Expand Down
16 changes: 1 addition & 15 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg)

for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
rnp->exp_tasks);
rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
Expand Down Expand Up @@ -1274,14 +1273,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
}

/*
* Initialize the RCU-boost waitqueue.
*/
static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
{
init_waitqueue_head(&rnp->boost_wq);
}

/*
* Create an RCU-boost kthread for the specified node if one does not
* already exist. We only create this kthread for preemptible RCU.
Expand All @@ -1306,7 +1297,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
Expand All @@ -1328,10 +1318,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}

static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
{
}

static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index)
Expand Down

0 comments on commit 2e16037

Please sign in to comment.