Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 182255
b: refs/heads/master
c: 8bd93a2
h: refs/heads/master
i:
  182253: 0c08e2a
  182251: ac9bbc0
  182247: c81168c
  182239: d7b7cd3
v: v3
  • Loading branch information
Paul E. McKenney authored and Ingo Molnar committed Feb 25, 2010
1 parent 566a7cb commit 3ef8b90
Show file tree
Hide file tree
Showing 5 changed files with 102 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 998f2ac3fea93bfa8b55c279fff68f7c5b9ab93d
refs/heads/master: 8bd93a2c5d4cab2ae17d06350daa7dbf546a4634
14 changes: 14 additions & 0 deletions trunk/include/linux/cpumask.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,

#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_and(cpu, mask, and) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
#else
Expand Down Expand Up @@ -202,6 +204,18 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
(cpu) = cpumask_next((cpu), (mask)), \
(cpu) < nr_cpu_ids;)

/**
* for_each_cpu_not - iterate over every cpu in a complemented mask
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
*
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_not(cpu, mask) \
for ((cpu) = -1; \
(cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;)

/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
Expand Down
16 changes: 16 additions & 0 deletions trunk/init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,22 @@ config RCU_FANOUT_EXACT

Say N if unsure.

config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
depends on TREE_RCU && NO_HZ && SMP
default n
help
This option causes RCU to attempt to accelerate grace periods
in order to allow the final CPU to enter dynticks-idle state
more quickly. On the other hand, this option increases the
overhead of the dynticks-idle checking, particularly on systems
with large numbers of CPUs.

Say Y if energy efficiency is critically important, particularly
if you have relatively few CPUs.

Say N if you are unsure.

config TREE_RCU_TRACE
def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU )
select DEBUG_FS
Expand Down
5 changes: 2 additions & 3 deletions trunk/kernel/rcutree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1550,10 +1550,9 @@ static int rcu_pending(int cpu)
/*
* Check to see if any future RCU-related work will need to be done
* by the current CPU, even if none need be done immediately, returning
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
* 1 if so.
*/
int rcu_needs_cpu(int cpu)
static int rcu_needs_cpu_quick_check(int cpu)
{
/* RCU callbacks either ready or pending? */
return per_cpu(rcu_sched_data, cpu).nxtlist ||
Expand Down
69 changes: 69 additions & 0 deletions trunk/kernel/rcutree_plugin.h
Original file line number Diff line number Diff line change
Expand Up @@ -906,3 +906,72 @@ static void __init __rcu_init_preempt(void)
}

#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */

#if !defined(CONFIG_RCU_FAST_NO_HZ)

/*
* Check to see if any future RCU-related work will need to be done
* by the current CPU, even if none need be done immediately, returning
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
*
* Because we have preemptible RCU, just check whether this CPU needs
* any flavor of RCU. Do not chew up lots of CPU cycles with preemption
* disabled in a most-likely vain attempt to cause RCU not to need this CPU.
*/
int rcu_needs_cpu(int cpu)
{
return rcu_needs_cpu_quick_check(cpu);
}

#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */

#define RCU_NEEDS_CPU_FLUSHES 5

/*
* Check to see if any future RCU-related work will need to be done
* by the current CPU, even if none need be done immediately, returning
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
*
* Because we are not supporting preemptible RCU, attempt to accelerate
* any current grace periods so that RCU no longer needs this CPU, but
* only if all other CPUs are already in dynticks-idle mode. This will
* allow the CPU cores to be powered down immediately, as opposed to after
* waiting many milliseconds for grace periods to elapse.
*/
int rcu_needs_cpu(int cpu)
{
int c = 1;
int i;
int thatcpu;

/* Don't bother unless we are the last non-dyntick-idle CPU. */
for_each_cpu_not(thatcpu, nohz_cpu_mask)
if (thatcpu != cpu)
return rcu_needs_cpu_quick_check(cpu);

/* Try to push remaining RCU-sched and RCU-bh callbacks through. */
for (i = 0; i < RCU_NEEDS_CPU_FLUSHES && c; i++) {
c = 0;
if (per_cpu(rcu_sched_data, cpu).nxtlist) {
rcu_sched_qs(cpu);
force_quiescent_state(&rcu_sched_state, 0);
__rcu_process_callbacks(&rcu_sched_state,
&per_cpu(rcu_sched_data, cpu));
c = !!per_cpu(rcu_sched_data, cpu).nxtlist;
}
if (per_cpu(rcu_bh_data, cpu).nxtlist) {
rcu_bh_qs(cpu);
force_quiescent_state(&rcu_bh_state, 0);
__rcu_process_callbacks(&rcu_bh_state,
&per_cpu(rcu_bh_data, cpu));
c = !!per_cpu(rcu_bh_data, cpu).nxtlist;
}
}

/* If RCU callbacks are still pending, RCU still needs this CPU. */
return c;
}

#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */

0 comments on commit 3ef8b90

Please sign in to comment.