Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 323553
b: refs/heads/master
c: 5d18023
h: refs/heads/master
i:
  323551: 0b6670a
v: v3
  • Loading branch information
Peter Zijlstra authored and Paul E. McKenney committed Sep 23, 2012
1 parent 6777e52 commit 57cb99d
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 22 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0d8ee37e2fcb7b77b9c5dee784beca5a215cad4c
refs/heads/master: 5d18023294abc22984886bd7185344e0c2be0daf
41 changes: 20 additions & 21 deletions trunk/kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5304,27 +5304,17 @@ void idle_task_exit(void)
}

/*
* While a dead CPU has no uninterruptible tasks queued at this point,
* it might still have a nonzero ->nr_uninterruptible counter, because
* for performance reasons the counter is not stricly tracking tasks to
* their home CPUs. So we just add the counter to another CPU's counter,
* to keep the global sum constant after CPU-down:
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));

rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
rq_src->nr_uninterruptible = 0;
}

/*
* remove the tasks which were accounted by rq from calc_load_tasks.
* Since this CPU is going 'away' for a while, fold any nr_active delta
* we might have. Assumes we're called after migrate_tasks() so that the
* nr_active count is stable.
*
* Also see the comment "Global load-average calculations".
*/
static void calc_global_load_remove(struct rq *rq)
static void calc_load_migrate(struct rq *rq)
{
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
rq->calc_load_active = 0;
long delta = calc_load_fold_active(rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}

/*
Expand Down Expand Up @@ -5617,9 +5607,18 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_tasks(cpu);
BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;

migrate_nr_uninterruptible(rq);
calc_global_load_remove(rq);
case CPU_DEAD:
{
struct rq *dest_rq;

local_irq_save(flags);
dest_rq = cpu_rq(smp_processor_id());
raw_spin_lock(&dest_rq->lock);
calc_load_migrate(rq);
raw_spin_unlock_irqrestore(&dest_rq->lock, flags);
}
break;
#endif
}
Expand Down

0 comments on commit 57cb99d

Please sign in to comment.