Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 110723
b: refs/heads/master
c: eb75580
h: refs/heads/master
i:
  110721: 2351829
  110719: aee1d86
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Aug 19, 2008
1 parent df45038 commit e8e659c
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 34 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0b148fa04852859972abbf848177b92daeef138a
refs/heads/master: eb755805f21bd5ded84026e167b7a90887ac42e5
79 changes: 46 additions & 33 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1387,53 +1387,67 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
update_load_sub(&rq->load, load);
}

#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);

if (rq->nr_running)
rq->avg_load_per_task = rq->load.weight / rq->nr_running;

return rq->avg_load_per_task;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED))
typedef int (*tg_visitor)(struct task_group *, void *);

/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
*/
static void
walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;

rcu_read_lock();
parent = &root_task_group;
down:
(*down)(parent, cpu, sd);
ret = (*down)(parent, data);
if (ret)
goto out_unlock;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;

up:
continue;
}
(*up)(parent, cpu, sd);
ret = (*up)(parent, data);
if (ret)
goto out_unlock;

child = parent;
parent = parent->parent;
if (parent)
goto up;
out_unlock:
rcu_read_unlock();

return ret;
}

static int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif

#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);

if (rq->nr_running)
rq->avg_load_per_task = rq->load.weight / rq->nr_running;

return rq->avg_load_per_task;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

static void __set_se_shares(struct sched_entity *se, unsigned long shares);

Expand Down Expand Up @@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
* This needs to be done in a bottom-up fashion because the rq weight of a
* parent group depends on the shares of its child groups.
*/
static void
tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long rq_weight = 0;
unsigned long shares = 0;
struct sched_domain *sd = data;
int i;

for_each_cpu_mask(i, sd->span) {
Expand All @@ -1522,17 +1536,19 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
__update_group_shares_cpu(tg, i, shares, rq_weight);
spin_unlock_irqrestore(&rq->lock, flags);
}

return 0;
}

/*
* Compute the cpu's hierarchical load factor for each task group.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static void
tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
static int tg_load_down(struct task_group *tg, void *data)
{
unsigned long load;
long cpu = (long)data;

if (!tg->parent) {
load = cpu_rq(cpu)->load.weight;
Expand All @@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
}

tg->cfs_rq[cpu]->h_load = load;
}

static void
tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
{
return 0;
}

static void update_shares(struct sched_domain *sd)
Expand All @@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd)

if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
sd->last_update = now;
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
walk_tg_tree(tg_nop, tg_shares_up, sd);
}
}

Expand All @@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
spin_lock(&rq->lock);
}

static void update_h_load(int cpu)
static void update_h_load(long cpu)
{
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}

#else
Expand Down

0 comments on commit e8e659c

Please sign in to comment.