Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 8353
b: refs/heads/master
c: 5969fe0
h: refs/heads/master
i:
  8351: 684e7c4
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Sep 10, 2005
1 parent ba171f7 commit 21449da
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e17224bf1d01b461ec02a60f5a9b7657a89bdd23
refs/heads/master: 5969fe0618051e8577316555a81a6e44b7b7d640
34 changes: 28 additions & 6 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1906,7 +1906,7 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
*/
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum idle_type idle)
unsigned long *imbalance, enum idle_type idle, int *sd_idle)
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
Expand All @@ -1931,6 +1931,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
avg_load = 0;

for_each_cpu_mask(i, group->cpumask) {
if (*sd_idle && !idle_cpu(i))
*sd_idle = 0;

/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
Expand Down Expand Up @@ -2074,10 +2077,14 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
unsigned long imbalance;
int nr_moved, all_pinned = 0;
int active_balance = 0;
int sd_idle = 0;

if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
sd_idle = 1;

schedstat_inc(sd, lb_cnt[idle]);

group = find_busiest_group(sd, this_cpu, &imbalance, idle);
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
if (!group) {
schedstat_inc(sd, lb_nobusyg[idle]);
goto out_balanced;
Expand Down Expand Up @@ -2150,6 +2157,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
sd->balance_interval *= 2;
}

if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
return -1;
return nr_moved;

out_balanced:
Expand All @@ -2161,6 +2170,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;

if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
return -1;
return 0;
}

Expand All @@ -2178,9 +2189,13 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
runqueue_t *busiest = NULL;
unsigned long imbalance;
int nr_moved = 0;
int sd_idle = 0;

if (sd->flags & SD_SHARE_CPUPOWER)
sd_idle = 1;

schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
if (!group) {
schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
goto out_balanced;
Expand All @@ -2205,15 +2220,19 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
spin_unlock(&busiest->lock);
}

if (!nr_moved)
if (!nr_moved) {
schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
else
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
return -1;
} else
sd->nr_balance_failed = 0;

return nr_moved;

out_balanced:
schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
return -1;
sd->nr_balance_failed = 0;
return 0;
}
Expand Down Expand Up @@ -2338,7 +2357,10 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,

if (j - sd->last_balance >= interval) {
if (load_balance(this_cpu, this_rq, sd, idle)) {
/* We've pulled tasks over so no longer idle */
/* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
* not idle.
*/
idle = NOT_IDLE;
}
sd->last_balance += interval;
Expand Down

0 comments on commit 21449da

Please sign in to comment.