Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223986
b: refs/heads/master
c: 3d4b47b
h: refs/heads/master
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Nov 18, 2010
1 parent 73ed387 commit afe813a
Show file tree
Hide file tree
Showing 4 changed files with 93 additions and 84 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2069dd75c7d0f49355939e5586daf5a9ab216db7
refs/heads/master: 3d4b47b4b040c9d77dd68104cfc1055d89a55afd
105 changes: 28 additions & 77 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,9 +274,7 @@ struct task_group {

#define root_task_group init_task_group

/* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares.
*/
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);

#ifdef CONFIG_FAIR_GROUP_SCHED
Expand Down Expand Up @@ -344,6 +342,7 @@ struct cfs_rq {
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
int on_list;
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */

Expand Down Expand Up @@ -1547,7 +1546,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)

#ifdef CONFIG_FAIR_GROUP_SCHED

static void update_cfs_load(struct cfs_rq *cfs_rq);
static void update_cfs_load(struct cfs_rq *cfs_rq, int lb);
static void update_cfs_shares(struct cfs_rq *cfs_rq);

/*
Expand All @@ -1570,7 +1569,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
raw_spin_lock_irqsave(&rq->lock, flags);

update_rq_clock(rq);
update_cfs_load(cfs_rq);
update_cfs_load(cfs_rq, 1);

load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
load_avg -= cfs_rq->load_contribution;
Expand Down Expand Up @@ -7688,15 +7687,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)

#ifdef CONFIG_FAIR_GROUP_SCHED
static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu, int add,
struct sched_entity *se, int cpu,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg;
if (add)
list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);

tg->se[cpu] = se;
/* se could be NULL for init_task_group */
Expand All @@ -7716,7 +7713,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,

#ifdef CONFIG_RT_GROUP_SCHED
static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu, int add,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
Expand All @@ -7725,8 +7722,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
init_rt_rq(rt_rq, rq);
rt_rq->tg = tg;
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
if (add)
list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);

tg->rt_se[cpu] = rt_se;
if (!rt_se)
Expand Down Expand Up @@ -7835,15 +7830,15 @@ void __init sched_init(void)
* We achieve this by letting init_task_group's tasks sit
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL);
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */

rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
#ifdef CONFIG_CGROUP_SCHED
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL);
#endif
#endif

Expand Down Expand Up @@ -8119,7 +8114,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
if (!se)
goto err_free_rq;

init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
}

return 1;
Expand All @@ -8130,15 +8125,22 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 0;
}

static inline void register_fair_sched_group(struct task_group *tg, int cpu)
{
list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
&cpu_rq(cpu)->leaf_cfs_rq_list);
}

static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
int i;

/*
* Only empty task groups can be destroyed; so we can speculatively
* check on_list without danger of it being re-added.
*/
if (!tg->cfs_rq[cpu]->on_list)
return;

raw_spin_lock_irqsave(&rq->lock, flags);
list_del_leaf_cfs_rq(tg->cfs_rq[i]);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#else /* !CONFG_FAIR_GROUP_SCHED */
static inline void free_fair_sched_group(struct task_group *tg)
Expand All @@ -8151,10 +8153,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1;
}

static inline void register_fair_sched_group(struct task_group *tg, int cpu)
{
}

static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
}
Expand Down Expand Up @@ -8209,7 +8207,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
if (!rt_se)
goto err_free_rq;

init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}

return 1;
Expand All @@ -8219,17 +8217,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
err:
return 0;
}

static inline void register_rt_sched_group(struct task_group *tg, int cpu)
{
list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
&cpu_rq(cpu)->leaf_rt_rq_list);
}

static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{
list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline void free_rt_sched_group(struct task_group *tg)
{
Expand All @@ -8240,14 +8227,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}

static inline void register_rt_sched_group(struct task_group *tg, int cpu)
{
}

static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{
}
#endif /* CONFIG_RT_GROUP_SCHED */

#ifdef CONFIG_CGROUP_SCHED
Expand All @@ -8263,7 +8242,6 @@ struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
unsigned long flags;
int i;

tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
Expand All @@ -8276,10 +8254,6 @@ struct task_group *sched_create_group(struct task_group *parent)
goto err;

spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i) {
register_fair_sched_group(tg, i);
register_rt_sched_group(tg, i);
}
list_add_rcu(&tg->list, &task_groups);

WARN_ON(!parent); /* root should already exist */
Expand Down Expand Up @@ -8309,11 +8283,11 @@ void sched_destroy_group(struct task_group *tg)
unsigned long flags;
int i;

spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i) {
/* end participation in shares distribution */
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
unregister_rt_sched_group(tg, i);
}

spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
Expand Down Expand Up @@ -8391,7 +8365,6 @@ static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
unsigned long flags;

/*
* We can't change the weight of the root cgroup.
Expand All @@ -8408,19 +8381,6 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
if (tg->shares == shares)
goto done;

spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);

/* wait for any ongoing reference to this group to finish */
synchronize_sched();

/*
* Now we are free to modify the group's share on each cpu
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
for_each_possible_cpu(i) {
/*
Expand All @@ -8429,15 +8389,6 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
set_se_shares(tg->se[i], shares);
}

/*
* Enable load balance activity on this group, by inserting it back on
* each cpu's rq->leaf_cfs_rq_list.
*/
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
register_fair_sched_group(tg, i);
list_add_rcu(&tg->siblings, &tg->parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
done:
mutex_unlock(&shares_mutex);
return 0;
Expand Down
46 changes: 40 additions & 6 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,24 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
return cfs_rq->tg->cfs_rq[this_cpu];
}

static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (!cfs_rq->on_list) {
list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
&rq_of(cfs_rq)->leaf_cfs_rq_list);

cfs_rq->on_list = 1;
}
}

static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (cfs_rq->on_list) {
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
cfs_rq->on_list = 0;
}
}

/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
Expand Down Expand Up @@ -246,6 +264,14 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
return &cpu_rq(this_cpu)->cfs;
}

static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}

static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}

#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

Expand Down Expand Up @@ -648,7 +674,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
}

#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
static void update_cfs_load(struct cfs_rq *cfs_rq)
static void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
{
u64 period = sched_avg_period();
u64 now, delta;
Expand All @@ -673,6 +699,11 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
cfs_rq->load_period /= 2;
cfs_rq->load_avg /= 2;
}

if (lb && !cfs_rq->nr_running) {
if (cfs_rq->load_avg < (period / 8))
list_del_leaf_cfs_rq(cfs_rq);
}
}

static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
Expand Down Expand Up @@ -719,7 +750,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
reweight_entity(cfs_rq_of(se), se, shares);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_cfs_load(struct cfs_rq *cfs_rq)
static inline void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
{
}

Expand Down Expand Up @@ -849,7 +880,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
update_cfs_load(cfs_rq);
update_cfs_load(cfs_rq, 0);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);

Expand All @@ -863,6 +894,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;

if (cfs_rq->nr_running == 1)
list_add_leaf_cfs_rq(cfs_rq);
}

static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
Expand Down Expand Up @@ -907,7 +941,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
update_cfs_load(cfs_rq);
update_cfs_load(cfs_rq, 0);
account_entity_dequeue(cfs_rq, se);
update_min_vruntime(cfs_rq);
update_cfs_shares(cfs_rq);
Expand Down Expand Up @@ -1142,7 +1176,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);

update_cfs_load(cfs_rq);
update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
}

Expand Down Expand Up @@ -1172,7 +1206,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);

update_cfs_load(cfs_rq);
update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
}

Expand Down
Loading

0 comments on commit afe813a

Please sign in to comment.