Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105205
b: refs/heads/master
c: 363ab6f
h: refs/heads/master
i:
  105203: e07b8e4
v: v3
  • Loading branch information
Mike Travis authored and Thomas Gleixner committed May 23, 2008
1 parent cabbd08 commit 8bcc7b3
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 35 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 068b12772a64c2440ef2f64ac5d780688c06576f
refs/heads/master: 363ab6f1424cdea63e5d182312d60e19077b892a
2 changes: 1 addition & 1 deletion trunk/kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void)
goto out;

printk("Enabling non-boot CPUs ...\n");
for_each_cpu_mask(cpu, frozen_cpus) {
for_each_cpu_mask_nr(cpu, frozen_cpus) {
error = _cpu_up(cpu, 1);
if (!error) {
printk("CPU%d is up\n", cpu);
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/rcuclassic.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
*/
cpumask = rcp->cpumask;
cpu_clear(rdp->cpu, cpumask);
for_each_cpu_mask(cpu, cpumask)
for_each_cpu_mask_nr(cpu, cpumask)
smp_send_reschedule(cpu);
}
}
Expand Down
10 changes: 5 additions & 5 deletions trunk/kernel/rcupreempt.c
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ rcu_try_flip_idle(void)

/* Now ask each CPU for acknowledgement of the flip. */

for_each_cpu_mask(cpu, rcu_cpu_online_map) {
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
dyntick_save_progress_counter(cpu);
}
Expand All @@ -675,7 +675,7 @@ rcu_try_flip_waitack(void)
int cpu;

RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
for_each_cpu_mask(cpu, rcu_cpu_online_map)
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
if (rcu_try_flip_waitack_needed(cpu) &&
per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
Expand Down Expand Up @@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void)
/* Check to see if the sum of the "last" counters is zero. */

RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
for_each_cpu_mask(cpu, rcu_cpu_online_map)
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
if (sum != 0) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
Expand All @@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void)
smp_mb(); /* ^^^^^^^^^^^^ */

/* Call for a memory barrier from each CPU. */
for_each_cpu_mask(cpu, rcu_cpu_online_map) {
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
dyntick_save_progress_counter(cpu);
}
Expand All @@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void)
int cpu;

RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
for_each_cpu_mask(cpu, rcu_cpu_online_map)
for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
if (rcu_try_flip_waitmb_needed(cpu) &&
per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
Expand Down
36 changes: 18 additions & 18 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* Tally up the load of all CPUs in the group */
avg_load = 0;

for_each_cpu_mask(i, group->cpumask) {
for_each_cpu_mask_nr(i, group->cpumask) {
/* Bias balancing toward cpus of our domain */
if (local_group)
load = source_load(i, load_idx);
Expand Down Expand Up @@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
/* Traverse only the allowed CPUs */
cpus_and(*tmp, group->cpumask, p->cpus_allowed);

for_each_cpu_mask(i, *tmp) {
for_each_cpu_mask_nr(i, *tmp) {
load = weighted_cpuload(i);

if (load < min_load || (load == min_load && i == this_cpu)) {
Expand Down Expand Up @@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
max_cpu_load = 0;
min_cpu_load = ~0UL;

for_each_cpu_mask(i, group->cpumask) {
for_each_cpu_mask_nr(i, group->cpumask) {
struct rq *rq;

if (!cpu_isset(i, *cpus))
Expand Down Expand Up @@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long max_load = 0;
int i;

for_each_cpu_mask(i, group->cpumask) {
for_each_cpu_mask_nr(i, group->cpumask) {
unsigned long wl;

if (!cpu_isset(i, *cpus))
Expand Down Expand Up @@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h)
int balance_cpu;

cpu_clear(this_cpu, cpus);
for_each_cpu_mask(balance_cpu, cpus) {
for_each_cpu_mask_nr(balance_cpu, cpus) {
/*
* If this cpu gets work to do, stop the load balancing
* work being done for other cpus. Next load
Expand Down Expand Up @@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,

cpus_clear(*covered);

for_each_cpu_mask(i, *span) {
for_each_cpu_mask_nr(i, *span) {
struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
Expand All @@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
cpus_clear(sg->cpumask);
sg->__cpu_power = 0;

for_each_cpu_mask(j, *span) {
for_each_cpu_mask_nr(j, *span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;

Expand Down Expand Up @@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg)
return;
do {
for_each_cpu_mask(j, sg->cpumask) {
for_each_cpu_mask_nr(j, sg->cpumask) {
struct sched_domain *sd;

sd = &per_cpu(phys_domains, j);
Expand All @@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
int cpu, i;

for_each_cpu_mask(cpu, *cpu_map) {
for_each_cpu_mask_nr(cpu, *cpu_map) {
struct sched_group **sched_group_nodes
= sched_group_nodes_bycpu[cpu];

Expand Down Expand Up @@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/*
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = NULL, *p;
SCHED_CPUMASK_VAR(nodemask, allmasks);

Expand Down Expand Up @@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);

Expand All @@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);

Expand Down Expand Up @@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
goto error;
}
sched_group_nodes[i] = sg;
for_each_cpu_mask(j, *nodemask) {
for_each_cpu_mask_nr(j, *nodemask) {
struct sched_domain *sd;

sd = &per_cpu(node_domains, j);
Expand Down Expand Up @@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(cpu_domains, i);

init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(core_domains, i);

init_sched_groups_power(i, sd);
}
#endif

for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(phys_domains, i);

init_sched_groups_power(i, sd);
Expand All @@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif

/* Attach the domains */
for_each_cpu_mask(i, *cpu_map) {
for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
Expand Down Expand Up @@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)

unregister_sched_domain_sysctl();

for_each_cpu_mask(i, *cpu_map)
for_each_cpu_mask_nr(i, *cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
arch_destroy_sched_domains(cpu_map, &tmpmask);
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1022,7 +1022,7 @@ static int wake_idle(int cpu, struct task_struct *p)
|| ((sd->flags & SD_WAKE_IDLE_FAR)
&& !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
for_each_cpu_mask_nr(i, tmp) {
if (idle_cpu(i)) {
if (i != task_cpu(p)) {
schedstat_inc(p,
Expand Down
6 changes: 3 additions & 3 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
return 1;

span = sched_rt_period_mask();
for_each_cpu_mask(i, span) {
for_each_cpu_mask_nr(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
Expand Down Expand Up @@ -272,7 +272,7 @@ static int balance_runtime(struct rt_rq *rt_rq)

spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu_mask(i, rd->span) {
for_each_cpu_mask_nr(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;

Expand Down Expand Up @@ -1000,7 +1000,7 @@ static int pull_rt_task(struct rq *this_rq)

next = pick_next_task_rt(this_rq);

for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;

Expand Down
4 changes: 2 additions & 2 deletions trunk/kernel/taskstats.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
return -EINVAL;

if (isadd == REGISTER) {
for_each_cpu_mask(cpu, mask) {
for_each_cpu_mask_nr(cpu, mask) {
s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
cpu_to_node(cpu));
if (!s)
Expand All @@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)

/* Deregister or cleanup */
cleanup:
for_each_cpu_mask(cpu, mask) {
for_each_cpu_mask_nr(cpu, mask) {
listeners = &per_cpu(listener_array, cpu);
down_write(&listeners->sem);
list_for_each_entry_safe(s, tmp, &listeners->list, list) {
Expand Down
6 changes: 3 additions & 3 deletions trunk/kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
might_sleep();
lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
lock_release(&wq->lockdep_map, 1, _THIS_IP_);
for_each_cpu_mask(cpu, *cpu_map)
for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
Expand Down Expand Up @@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
wq = cwq->wq;
cpu_map = wq_cpu_map(wq);

for_each_cpu_mask(cpu, *cpu_map)
for_each_cpu_mask_nr(cpu, *cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}

Expand Down Expand Up @@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del(&wq->list);
spin_unlock(&workqueue_lock);

for_each_cpu_mask(cpu, *cpu_map)
for_each_cpu_mask_nr(cpu, *cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
put_online_cpus();

Expand Down

0 comments on commit 8bcc7b3

Please sign in to comment.