Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100190
b: refs/heads/master
c: 1f11eb6
h: refs/heads/master
v: v3
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Jun 6, 2008
1 parent 57bd57a commit d78717c
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 099f98c8a1f13501a98afbfff4756395a610581c
refs/heads/master: 1f11eb6a8bc92536d9e93ead48fa3ffbd1478571
4 changes: 2 additions & 2 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -903,8 +903,8 @@ struct sched_class {
void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);

void (*join_domain)(struct rq *rq);
void (*leave_domain)(struct rq *rq);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);

void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
Expand Down
54 changes: 40 additions & 14 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,7 @@ struct rq {
int push_cpu;
/* cpu of this runqueue: */
int cpu;
int online;

struct task_struct *migration_thread;
struct list_head migration_queue;
Expand Down Expand Up @@ -1498,6 +1499,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
#endif

#define sched_class_highest (&rt_sched_class)
#define for_each_class(class) \
for (class = sched_class_highest; class; class = class->next)

static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
Expand Down Expand Up @@ -6065,6 +6068,36 @@ static void unregister_sched_domain_sysctl(void)
}
#endif

static void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;

cpu_set(rq->cpu, rq->rd->online);
rq->online = 1;

for_each_class(class) {
if (class->rq_online)
class->rq_online(rq);
}
}
}

static void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;

for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq);
}

cpu_clear(rq->cpu, rq->rd->online);
rq->online = 0;
}
}

/*
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
Expand Down Expand Up @@ -6102,7 +6135,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpu_isset(cpu, rq->rd->span));
cpu_set(cpu, rq->rd->online);

set_rq_online(rq);
}
spin_unlock_irqrestore(&rq->lock, flags);
break;
Expand Down Expand Up @@ -6163,7 +6197,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpu_isset(cpu, rq->rd->span));
cpu_clear(cpu, rq->rd->online);
set_rq_offline(rq);
}
spin_unlock_irqrestore(&rq->lock, flags);
break;
Expand Down Expand Up @@ -6385,20 +6419,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
unsigned long flags;
const struct sched_class *class;

spin_lock_irqsave(&rq->lock, flags);

if (rq->rd) {
struct root_domain *old_rd = rq->rd;

for (class = sched_class_highest; class; class = class->next) {
if (class->leave_domain)
class->leave_domain(rq);
}
if (cpu_isset(rq->cpu, old_rd->online))
set_rq_offline(rq);

cpu_clear(rq->cpu, old_rd->span);
cpu_clear(rq->cpu, old_rd->online);

if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd);
Expand All @@ -6409,12 +6439,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)

cpu_set(rq->cpu, rd->span);
if (cpu_isset(rq->cpu, cpu_online_map))
cpu_set(rq->cpu, rd->online);

for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain)
class->join_domain(rq);
}
set_rq_online(rq);

spin_unlock_irqrestore(&rq->lock, flags);
}
Expand Down Expand Up @@ -7824,6 +7849,7 @@ void __init sched_init(void)
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
rq_attach_root(rq, &def_root_domain);
Expand Down
24 changes: 18 additions & 6 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ static inline int rt_overloaded(struct rq *rq)

static inline void rt_set_overload(struct rq *rq)
{
if (!rq->online)
return;

cpu_set(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
Expand All @@ -26,6 +29,9 @@ static inline void rt_set_overload(struct rq *rq)

static inline void rt_clear_overload(struct rq *rq)
{
if (!rq->online)
return;

/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpu_clear(rq->cpu, rq->rd->rto_mask);
Expand Down Expand Up @@ -394,7 +400,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq->highest_prio = rt_se_prio(rt_se);
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se));

if (rq->online)
cpupri_set(&rq->rd->cpupri, rq->cpu,
rt_se_prio(rt_se));
}
#endif
#ifdef CONFIG_SMP
Expand Down Expand Up @@ -448,7 +457,10 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)

if (rt_rq->highest_prio != highest_prio) {
struct rq *rq = rq_of_rt_rq(rt_rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio);

if (rq->online)
cpupri_set(&rq->rd->cpupri, rq->cpu,
rt_rq->highest_prio);
}

update_rt_migration(rq_of_rt_rq(rt_rq));
Expand Down Expand Up @@ -1154,7 +1166,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
}

/* Assumes rq->lock is held */
static void join_domain_rt(struct rq *rq)
static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
Expand All @@ -1163,7 +1175,7 @@ static void join_domain_rt(struct rq *rq)
}

/* Assumes rq->lock is held */
static void leave_domain_rt(struct rq *rq)
static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
Expand Down Expand Up @@ -1331,8 +1343,8 @@ static const struct sched_class rt_sched_class = {
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
.join_domain = join_domain_rt,
.leave_domain = leave_domain_rt,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
Expand Down

0 comments on commit d78717c

Please sign in to comment.