Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223987
b: refs/heads/master
c: 9e3081c
h: refs/heads/master
i:
  223985: 73ed387
  223983: 4c2204d
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Nov 18, 2010
1 parent afe813a commit 8958273
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 68 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3d4b47b4b040c9d77dd68104cfc1055d89a55afd
refs/heads/master: 9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a
67 changes: 0 additions & 67 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,13 +279,6 @@ static DEFINE_SPINLOCK(task_group_lock);

#ifdef CONFIG_FAIR_GROUP_SCHED

#ifdef CONFIG_SMP
static int root_task_group_empty(void)
{
return list_empty(&root_task_group.children);
}
#endif

# define INIT_TASK_GROUP_LOAD NICE_0_LOAD

/*
Expand Down Expand Up @@ -1546,48 +1539,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)

#ifdef CONFIG_FAIR_GROUP_SCHED

static void update_cfs_load(struct cfs_rq *cfs_rq, int lb);
static void update_cfs_shares(struct cfs_rq *cfs_rq);

/*
* update tg->load_weight by folding this cpu's load_avg
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
long load_avg;
struct cfs_rq *cfs_rq;
unsigned long flags;
int cpu = (long)data;
struct rq *rq;

if (!tg->se[cpu])
return 0;

rq = cpu_rq(cpu);
cfs_rq = tg->cfs_rq[cpu];

raw_spin_lock_irqsave(&rq->lock, flags);

update_rq_clock(rq);
update_cfs_load(cfs_rq, 1);

load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
load_avg -= cfs_rq->load_contribution;

atomic_add(load_avg, &tg->load_weight);
cfs_rq->load_contribution += load_avg;

/*
* We need to update shares after updating tg->load_weight in
* order to adjust the weight of groups with long running tasks.
*/
update_cfs_shares(cfs_rq);

raw_spin_unlock_irqrestore(&rq->lock, flags);

return 0;
}

/*
* Compute the cpu's hierarchical load factor for each task group.
* This needs to be done in a top-down fashion because the load of a child
Expand All @@ -1611,29 +1562,11 @@ static int tg_load_down(struct task_group *tg, void *data)
return 0;
}

static void update_shares(long cpu)
{
if (root_task_group_empty())
return;

/*
* XXX: replace with an on-demand list
*/

walk_tg_tree(tg_nop, tg_shares_up, (void *)cpu);
}

static void update_h_load(long cpu)
{
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}

#else

static inline void update_shares(int cpu)
{
}

#endif

#ifdef CONFIG_PREEMPT
Expand Down
58 changes: 58 additions & 0 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2004,6 +2004,60 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
}

#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* update tg->load_weight by folding this cpu's load_avg
*/
static int tg_shares_up(struct task_group *tg, int cpu)
{
struct cfs_rq *cfs_rq;
unsigned long flags;
struct rq *rq;
long load_avg;

if (!tg->se[cpu])
return 0;

rq = cpu_rq(cpu);
cfs_rq = tg->cfs_rq[cpu];

raw_spin_lock_irqsave(&rq->lock, flags);

update_rq_clock(rq);
update_cfs_load(cfs_rq, 1);

load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
load_avg -= cfs_rq->load_contribution;
atomic_add(load_avg, &tg->load_weight);
cfs_rq->load_contribution += load_avg;

/*
* We need to update shares after updating tg->load_weight in
* order to adjust the weight of groups with long running tasks.
*/
update_cfs_shares(cfs_rq);

raw_spin_unlock_irqrestore(&rq->lock, flags);

return 0;
}

static void update_shares(int cpu)
{
struct cfs_rq *cfs_rq;
struct rq *rq = cpu_rq(cpu);

rcu_read_lock();
for_each_leaf_cfs_rq(rq, cfs_rq) {
struct task_group *tg = cfs_rq->tg;

do {
tg_shares_up(tg, cpu);
tg = tg->parent;
} while (tg);
}
rcu_read_unlock();
}

static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
Expand Down Expand Up @@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
return max_load_move - rem_load_move;
}
#else
static inline void update_shares(int cpu)
{
}

static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
Expand Down

0 comments on commit 8958273

Please sign in to comment.