From 6f115b40aff3c0539f46abce4e64fb6f28cc0dd6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Sep 2011 15:30:18 +0200 Subject: [PATCH] --- yaml --- r: 277407 b: refs/heads/master c: a195f004e9496b4d99f471bb96e0a0c1af080909 h: refs/heads/master i: 277405: 1051b883a5e4895bf30dab7c887404a2f7824745 277403: f95d7a9b766299dae93e7b13217d20087a92ea00 277399: 5d45750deeb3bc3a4a608b732543a54712af03d6 277391: f0cd61cae641e5462acfe6441a4c79efbba0c69d 277375: b37a1d9e32de8fccb3d021c4ac83f9c53b596259 v: v3 --- [refs] | 2 +- trunk/kernel/sched/fair.c | 32 +++++++++++++++++++++++++------- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/[refs] b/[refs] index 7f4e85500761..350df869fd9e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5b54b56be5b540a9cb12682c4d0df5454c098a38 +refs/heads/master: a195f004e9496b4d99f471bb96e0a0c1af080909 diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index be47ce6da2a5..cea2fa853274 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -3132,6 +3132,8 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) } #define LBF_ALL_PINNED 0x01 +#define LBF_NEED_BREAK 0x02 +#define LBF_ABORT 0x04 /* * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? @@ -3237,8 +3239,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, goto out; list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { - if (loops++ > sysctl_sched_nr_migrate) + if (loops++ > sysctl_sched_nr_migrate) { + *lb_flags |= LBF_NEED_BREAK; break; + } if ((p->se.load.weight >> 1) > rem_load_move || !can_migrate_task(p, busiest, this_cpu, sd, idle, @@ -3255,8 +3259,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, * kernels will stop after the first task is pulled to minimize * the critical section. */ - if (idle == CPU_NEWLY_IDLE) + if (idle == CPU_NEWLY_IDLE) { + *lb_flags |= LBF_ABORT; break; + } #endif /* @@ -3374,6 +3380,9 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long busiest_weight = busiest_cfs_rq->load.weight; u64 rem_load, moved_load; + if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT)) + break; + /* * empty group or part of a throttled hierarchy */ @@ -3440,18 +3449,19 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, total_load_moved += load_moved; + if (*lb_flags & (LBF_NEED_BREAK|LBF_ABORT)) + break; + #ifdef CONFIG_PREEMPT /* * NEWIDLE balancing is a source of latency, so preemptible * kernels will stop after the first task is pulled to minimize * the critical section. */ - if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) - break; - - if (raw_spin_is_contended(&this_rq->lock) || - raw_spin_is_contended(&busiest->lock)) + if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) { + *lb_flags |= LBF_ABORT; break; + } #endif } while (load_moved && max_load_move > total_load_moved); @@ -4496,6 +4506,14 @@ static int load_balance(int this_cpu, struct rq *this_rq, if (ld_moved && this_cpu != smp_processor_id()) resched_cpu(this_cpu); + if (lb_flags & LBF_ABORT) + goto out_balanced; + + if (lb_flags & LBF_NEED_BREAK) { + lb_flags &= ~LBF_NEED_BREAK; + goto redo; + } + /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(lb_flags & LBF_ALL_PINNED)) { cpumask_clear_cpu(cpu_of(busiest), cpus);