Skip to content

Commit

Permalink
sched: Move nr_cpus_allowed out of 'struct sched_rt_entity'
Browse files Browse the repository at this point in the history
Since nr_cpus_allowed is used outside of sched/rt.c and wants to be
used outside of there more, move it to a more natural site.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-kr61f02y9brwzkh6x53pdptm@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed May 30, 2012
1 parent b654f7d commit 29baa74
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 20 deletions.
2 changes: 1 addition & 1 deletion arch/blackfin/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
unsigned long newsp;

#ifdef __ARCH_SYNC_CORE_DCACHE
if (current->rt.nr_cpus_allowed == num_possible_cpus())
if (current->nr_cpus_allowed == num_possible_cpus())
set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
#endif

Expand Down
2 changes: 1 addition & 1 deletion include/linux/init_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ extern struct cred init_cred;
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.nr_cpus_allowed= NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
.se = { \
Expand All @@ -157,7 +158,6 @@ extern struct cred init_cred;
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = RR_TIMESLICE, \
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
INIT_PUSHABLE_TASKS(tsk) \
Expand Down
2 changes: 1 addition & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1188,7 +1188,6 @@ struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned int time_slice;
int nr_cpus_allowed;

struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
Expand Down Expand Up @@ -1253,6 +1252,7 @@ struct task_struct {
#endif

unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;

#ifdef CONFIG_PREEMPT_RCU
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5015,7 +5015,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
p->sched_class->set_cpus_allowed(p, new_mask);

cpumask_copy(&p->cpus_allowed, new_mask);
p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}

/*
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
int want_sd = 1;
int sync = wake_flags & WF_SYNC;

if (p->rt.nr_cpus_allowed == 1)
if (p->nr_cpus_allowed == 1)
return prev_cpu;

if (sd_flag & SD_BALANCE_WAKE) {
Expand Down
36 changes: 21 additions & 15 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,27 +274,33 @@ static void update_rt_migration(struct rt_rq *rt_rq)

static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;

if (!rt_entity_is_task(rt_se))
return;

p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;

rt_rq->rt_nr_total++;
if (rt_se->nr_cpus_allowed > 1)
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;

update_rt_migration(rt_rq);
}

static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;

if (!rt_entity_is_task(rt_se))
return;

p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;

rt_rq->rt_nr_total--;
if (rt_se->nr_cpus_allowed > 1)
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;

update_rt_migration(rt_rq);
Expand Down Expand Up @@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)

enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);

if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);

inc_nr_running(rq);
Expand Down Expand Up @@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)

cpu = task_cpu(p);

if (p->rt.nr_cpus_allowed == 1)
if (p->nr_cpus_allowed == 1)
goto out;

/* For anything but wake ups, just return the task_cpu */
Expand Down Expand Up @@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
* will have to sort it out.
*/
if (curr && unlikely(rt_task(curr)) &&
(curr->rt.nr_cpus_allowed < 2 ||
(curr->nr_cpus_allowed < 2 ||
curr->prio <= p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
(p->nr_cpus_allowed > 1)) {
int target = find_lowest_rq(p);

if (target != -1)
Expand All @@ -1276,10 +1282,10 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)

static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
if (rq->curr->rt.nr_cpus_allowed == 1)
if (rq->curr->nr_cpus_allowed == 1)
return;

if (p->rt.nr_cpus_allowed != 1
if (p->nr_cpus_allowed != 1
&& cpupri_find(&rq->rd->cpupri, p, NULL))
return;

Expand Down Expand Up @@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
* The previous task needs to be made eligible for pushing
* if it is still active
*/
if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}

Expand All @@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
(cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
(p->rt.nr_cpus_allowed > 1))
(p->nr_cpus_allowed > 1))
return 1;
return 0;
}
Expand Down Expand Up @@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task)
if (unlikely(!lowest_mask))
return -1;

if (task->rt.nr_cpus_allowed == 1)
if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */

if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Expand Down Expand Up @@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)

BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->rt.nr_cpus_allowed <= 1);
BUG_ON(p->nr_cpus_allowed <= 1);

BUG_ON(!p->on_rq);
BUG_ON(!rt_task(p));
Expand Down Expand Up @@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
has_pushable_tasks(rq) &&
p->rt.nr_cpus_allowed > 1 &&
p->nr_cpus_allowed > 1 &&
rt_task(rq->curr) &&
(rq->curr->rt.nr_cpus_allowed < 2 ||
(rq->curr->nr_cpus_allowed < 2 ||
rq->curr->prio <= p->prio))
push_rt_tasks(rq);
}
Expand All @@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
* Only update if the process changes its state from whether it
* can migrate or not.
*/
if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
if ((p->nr_cpus_allowed > 1) == (weight > 1))
return;

rq = task_rq(p);
Expand Down

0 comments on commit 29baa74

Please sign in to comment.