Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 8352
b: refs/heads/master
c: e17224b
h: refs/heads/master
v: v3
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Sep 10, 2005
1 parent 684e7c4 commit ba171f7
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d6d5cfaf4551aa7713ca6ab73bb77e832602204b
refs/heads/master: e17224bf1d01b461ec02a60f5a9b7657a89bdd23
9 changes: 2 additions & 7 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2075,7 +2075,6 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
int nr_moved, all_pinned = 0;
int active_balance = 0;

spin_lock(&this_rq->lock);
schedstat_inc(sd, lb_cnt[idle]);

group = find_busiest_group(sd, this_cpu, &imbalance, idle);
Expand All @@ -2102,18 +2101,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
* still unbalanced. nr_moved simply stays zero, so it is
* correctly treated as an imbalance.
*/
double_lock_balance(this_rq, busiest);
double_rq_lock(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
imbalance, sd, idle, &all_pinned);
spin_unlock(&busiest->lock);
double_rq_unlock(this_rq, busiest);

/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(all_pinned))
goto out_balanced;
}

spin_unlock(&this_rq->lock);

if (!nr_moved) {
schedstat_inc(sd, lb_failed[idle]);
sd->nr_balance_failed++;
Expand Down Expand Up @@ -2156,8 +2153,6 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
return nr_moved;

out_balanced:
spin_unlock(&this_rq->lock);

schedstat_inc(sd, lb_balanced[idle]);

sd->nr_balance_failed = 0;
Expand Down

0 comments on commit ba171f7

Please sign in to comment.