Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 76155
b: refs/heads/master
c: 0eab914
h: refs/heads/master
i:
  76153: 689c0b7
  76151: 69017cb
v: v3
  • Loading branch information
Ingo Molnar committed Jan 25, 2008
1 parent 6788870 commit 44ab4be
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d7876a08db50895ed9808ede4a259cccf65eba47
refs/heads/master: 0eab9146571dfa9b50ea952ec2ab27d591f26b63
32 changes: 17 additions & 15 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,17 +235,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares);
* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group = {
.se = init_sched_entity_p,
.se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p,
};

#ifdef CONFIG_FAIR_USER_SCHED
# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif

#define MIN_GROUP_SHARES 2
#define MIN_GROUP_SHARES 2

static int init_task_group_load = INIT_TASK_GROUP_LOAD;

Expand Down Expand Up @@ -352,8 +352,8 @@ struct rt_rq {

/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new
* variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
Expand All @@ -365,12 +365,12 @@ struct root_domain {
cpumask_t span;
cpumask_t online;

/*
/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_t rto_mask;
atomic_t rto_count;
atomic_t rto_count;
};

static struct root_domain def_root_domain;
Expand Down Expand Up @@ -434,7 +434,7 @@ struct rq {
atomic_t nr_iowait;

#ifdef CONFIG_SMP
struct root_domain *rd;
struct root_domain *rd;
struct sched_domain *sd;

/* For active balancing */
Expand Down Expand Up @@ -5066,7 +5066,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask);
else {
p->cpus_allowed = new_mask;
p->cpus_allowed = new_mask;
p->nr_cpus_allowed = cpus_weight(new_mask);
}

Expand Down Expand Up @@ -5847,9 +5847,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (rq->rd) {
struct root_domain *old_rd = rq->rd;

for (class = sched_class_highest; class; class = class->next)
for (class = sched_class_highest; class; class = class->next) {
if (class->leave_domain)
class->leave_domain(rq);
}

if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
Expand All @@ -5858,9 +5859,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
atomic_inc(&rd->refcount);
rq->rd = rd;

for (class = sched_class_highest; class; class = class->next)
for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
}

spin_unlock_irqrestore(&rq->lock, flags);
}
Expand Down Expand Up @@ -5895,11 +5897,11 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
}

/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void cpu_attach_domain(struct sched_domain *sd,
struct root_domain *rd, int cpu)
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
Expand Down Expand Up @@ -7095,7 +7097,7 @@ static int rebalance_shares(struct sched_domain *sd, int this_cpu)
for_each_cpu_mask(i, sdspan)
total_load += tg->cfs_rq[i]->load.weight;

/* Nothing to do if this group has no load */
/* Nothing to do if this group has no load */
if (!total_load)
continue;

Expand Down

0 comments on commit 44ab4be

Please sign in to comment.