Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 76130
b: refs/heads/master
c: 73fe6aa
h: refs/heads/master
v: v3
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Jan 25, 2008
1 parent cc15734 commit 265b180
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c7a1e46aa9782a947cf2ed506245d43396dbf991
refs/heads/master: 73fe6aae84400e2b475e2a1dc4e8592cd3ed6e69
1 change: 1 addition & 0 deletions trunk/include/linux/init_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ extern struct group_info init_groups;
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.nr_cpus_allowed = NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
.run_list = LIST_HEAD_INIT(tsk.run_list), \
Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -847,6 +847,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p);
void (*task_new) (struct rq *rq, struct task_struct *p);
void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask);
};

struct load_weight {
Expand Down Expand Up @@ -956,6 +957,7 @@ struct task_struct {

unsigned int policy;
cpumask_t cpus_allowed;
int nr_cpus_allowed;
unsigned int time_slice;

#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -1242,6 +1242,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
p->nr_cpus_allowed = current->nr_cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
Expand Down
9 changes: 8 additions & 1 deletion trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,7 @@ struct rt_rq {
int rt_load_balance_idx;
struct list_head *rt_load_balance_head, *rt_load_balance_curr;
unsigned long rt_nr_running;
unsigned long rt_nr_migratory;
/* highest queued rt task prio */
int highest_prio;
};
Expand Down Expand Up @@ -5144,7 +5145,13 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
goto out;
}

p->cpus_allowed = new_mask;
if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, &new_mask);
else {
p->cpus_allowed = new_mask;
p->nr_cpus_allowed = cpus_weight(new_mask);
}

/* Can the task run on the task's current CPU? If so, we're done */
if (cpu_isset(task_cpu(p), new_mask))
goto out;
Expand Down
50 changes: 45 additions & 5 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,14 @@ static inline void rt_clear_overload(struct rq *rq)
atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask);
}

static void update_rt_migration(struct rq *rq)
{
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
rt_set_overload(rq);
else
rt_clear_overload(rq);
}
#endif /* CONFIG_SMP */

/*
Expand Down Expand Up @@ -65,8 +73,10 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
#ifdef CONFIG_SMP
if (p->prio < rq->rt.highest_prio)
rq->rt.highest_prio = p->prio;
if (rq->rt.rt_nr_running > 1)
rt_set_overload(rq);
if (p->nr_cpus_allowed > 1)
rq->rt.rt_nr_migratory++;

update_rt_migration(rq);
#endif /* CONFIG_SMP */
}

Expand All @@ -88,8 +98,10 @@ static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
} /* otherwise leave rq->highest prio alone */
} else
rq->rt.highest_prio = MAX_RT_PRIO;
if (rq->rt.rt_nr_running < 2)
rt_clear_overload(rq);
if (p->nr_cpus_allowed > 1)
rq->rt.rt_nr_migratory--;

update_rt_migration(rq);
#endif /* CONFIG_SMP */
}

Expand Down Expand Up @@ -182,7 +194,8 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
(cpu < 0 || cpu_isset(cpu, p->cpus_allowed)))
(cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
(p->nr_cpus_allowed > 1))
return 1;
return 0;
}
Expand Down Expand Up @@ -584,6 +597,32 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
/* don't touch RT tasks */
return 0;
}
static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
{
int weight = cpus_weight(*new_mask);

BUG_ON(!rt_task(p));

/*
* Update the migration status of the RQ if we have an RT task
* which is running AND changing its weight value.
*/
if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
struct rq *rq = task_rq(p);

if ((p->nr_cpus_allowed <= 1) && (weight > 1))
rq->rt.rt_nr_migratory++;
else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
BUG_ON(!rq->rt.rt_nr_migratory);
rq->rt.rt_nr_migratory--;
}

update_rt_migration(rq);
}

p->cpus_allowed = *new_mask;
p->nr_cpus_allowed = weight;
}
#else /* CONFIG_SMP */
# define schedule_tail_balance_rt(rq) do { } while (0)
# define schedule_balance_rt(rq, prev) do { } while (0)
Expand Down Expand Up @@ -637,6 +676,7 @@ const struct sched_class rt_sched_class = {
#ifdef CONFIG_SMP
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
#endif

.set_curr_task = set_curr_task_rt,
Expand Down

0 comments on commit 265b180

Please sign in to comment.