Skip to content

Commit

Permalink
sched: convert sched.c from for_each_cpu_mask to for_each_cpu.
Browse files Browse the repository at this point in the history
Impact: trivial API conversion

This is a simple conversion, but note that for_each_cpu() terminates
with i >= nr_cpu_ids, not i == NR_CPUS like for_each_cpu_mask() did.

I don't convert all of them: sd->span changes in a later patch, so
change those iterators there rather than here.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Rusty Russell authored and Ingo Molnar committed Nov 24, 2008
1 parent ea6f18e commit abcd083
Showing 1 changed file with 18 additions and 18 deletions.
36 changes: 18 additions & 18 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2061,7 +2061,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* Tally up the load of all CPUs in the group */
avg_load = 0;

for_each_cpu_mask_nr(i, group->cpumask) {
for_each_cpu(i, &group->cpumask) {
/* Bias balancing toward cpus of our domain */
if (local_group)
load = source_load(i, load_idx);
Expand Down Expand Up @@ -2103,7 +2103,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
/* Traverse only the allowed CPUs */
cpus_and(*tmp, group->cpumask, p->cpus_allowed);

for_each_cpu_mask_nr(i, *tmp) {
for_each_cpu(i, tmp) {
load = weighted_cpuload(i);

if (load < min_load || (load == min_load && i == this_cpu)) {
Expand Down Expand Up @@ -3121,7 +3121,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
max_cpu_load = 0;
min_cpu_load = ~0UL;

for_each_cpu_mask_nr(i, group->cpumask) {
for_each_cpu(i, &group->cpumask) {
struct rq *rq;

if (!cpu_isset(i, *cpus))
Expand Down Expand Up @@ -3400,7 +3400,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long max_load = 0;
int i;

for_each_cpu_mask_nr(i, group->cpumask) {
for_each_cpu(i, &group->cpumask) {
unsigned long wl;

if (!cpu_isset(i, *cpus))
Expand Down Expand Up @@ -3942,7 +3942,7 @@ static void run_rebalance_domains(struct softirq_action *h)
int balance_cpu;

cpu_clear(this_cpu, cpus);
for_each_cpu_mask_nr(balance_cpu, cpus) {
for_each_cpu(balance_cpu, &cpus) {
/*
* If this cpu gets work to do, stop the load balancing
* work being done for other cpus. Next load
Expand Down Expand Up @@ -6906,7 +6906,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,

cpus_clear(*covered);

for_each_cpu_mask_nr(i, *span) {
for_each_cpu(i, span) {
struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
Expand All @@ -6917,7 +6917,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
cpus_clear(sg->cpumask);
sg->__cpu_power = 0;

for_each_cpu_mask_nr(j, *span) {
for_each_cpu(j, span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;

Expand Down Expand Up @@ -7117,7 +7117,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg)
return;
do {
for_each_cpu_mask_nr(j, sg->cpumask) {
for_each_cpu(j, &sg->cpumask) {
struct sched_domain *sd;

sd = &per_cpu(phys_domains, j);
Expand All @@ -7142,7 +7142,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
int cpu, i;

for_each_cpu_mask_nr(cpu, *cpu_map) {
for_each_cpu(cpu, cpu_map) {
struct sched_group **sched_group_nodes
= sched_group_nodes_bycpu[cpu];

Expand Down Expand Up @@ -7396,7 +7396,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/*
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
struct sched_domain *sd = NULL, *p;
SCHED_CPUMASK_VAR(nodemask, allmasks);

Expand Down Expand Up @@ -7463,7 +7463,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);

Expand All @@ -7480,7 +7480,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);

Expand Down Expand Up @@ -7547,7 +7547,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
goto error;
}
sched_group_nodes[i] = sg;
for_each_cpu_mask_nr(j, *nodemask) {
for_each_cpu(j, nodemask) {
struct sched_domain *sd;

sd = &per_cpu(node_domains, j);
Expand Down Expand Up @@ -7593,21 +7593,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
struct sched_domain *sd = &per_cpu(cpu_domains, i);

init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
struct sched_domain *sd = &per_cpu(core_domains, i);

init_sched_groups_power(i, sd);
}
#endif

for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
struct sched_domain *sd = &per_cpu(phys_domains, i);

init_sched_groups_power(i, sd);
Expand All @@ -7627,7 +7627,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif

/* Attach the domains */
for_each_cpu_mask_nr(i, *cpu_map) {
for_each_cpu(i, cpu_map) {
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
Expand Down Expand Up @@ -7709,7 +7709,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
cpumask_t tmpmask;
int i;

for_each_cpu_mask_nr(i, *cpu_map)
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
arch_destroy_sched_domains(cpu_map, &tmpmask);
Expand Down

0 comments on commit abcd083

Please sign in to comment.