Skip to content

Commit

Permalink
sched: prepatory code movement
Browse files Browse the repository at this point in the history
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 19, 2008
1 parent b40b2e8 commit b758149
Showing 1 changed file with 92 additions and 98 deletions.
190 changes: 92 additions & 98 deletions kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
* CFS operations on generic schedulable entities:
*/

static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}

#ifdef CONFIG_FAIR_GROUP_SCHED

/* cpu runqueue to which this cfs_rq is attached */
Expand All @@ -88,6 +93,54 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)

/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}

/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
* another cpu ('this_cpu')
*/
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
return cfs_rq->tg->cfs_rq[this_cpu];
}

/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)

/* Do the two (enqueued) entities belong to the same group ? */
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
if (se->cfs_rq == pse->cfs_rq)
return 1;

return 0;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return se->parent;
}

#else /* CONFIG_FAIR_GROUP_SCHED */

static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
Expand All @@ -97,13 +150,49 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)

#define entity_is_task(se) 1

#endif /* CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
for (; se; se = NULL)

static inline struct task_struct *task_of(struct sched_entity *se)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return container_of(se, struct task_struct, se);
return &task_rq(p)->cfs;
}

static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);

return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}

static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
return &cpu_rq(this_cpu)->cfs;
}

#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
return 1;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return NULL;
}

#endif /* CONFIG_FAIR_GROUP_SCHED */


/**************************************************************
* Scheduling class tree data structure manipulation methods:
Expand Down Expand Up @@ -699,101 +788,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* CFS operations on tasks:
*/

#ifdef CONFIG_FAIR_GROUP_SCHED

/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}

/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
* another cpu ('this_cpu')
*/
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
return cfs_rq->tg->cfs_rq[this_cpu];
}

/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)

/* Do the two (enqueued) entities belong to the same group ? */
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
if (se->cfs_rq == pse->cfs_rq)
return 1;

return 0;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return se->parent;
}

#else /* CONFIG_FAIR_GROUP_SCHED */

#define for_each_sched_entity(se) \
for (; se; se = NULL)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}

static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);

return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}

static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
return &cpu_rq(this_cpu)->cfs;
}

#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
return 1;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return NULL;
}

#endif /* CONFIG_FAIR_GROUP_SCHED */

#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
Expand Down

0 comments on commit b758149

Please sign in to comment.