Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 205416
b: refs/heads/master
c: 95ae3c5
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Jun 8, 2010
1 parent 0fa3b09 commit 0b5d338
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 63 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 21aa9af03d06cb1d19a3738e5cf12acff984e69b
refs/heads/master: 95ae3c59fa8ad616c73745e21154b5af0fb10168
20 changes: 14 additions & 6 deletions trunk/include/linux/cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -525,13 +525,21 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
return cgrp->subsys[subsys_id];
}

static inline struct cgroup_subsys_state *task_subsys_state(
struct task_struct *task, int subsys_id)
/*
* function to get the cgroup_subsys_state which allows for extra
* rcu_dereference_check() conditions, such as locks used during the
* cgroup_subsys::attach() methods.
*/
#define task_subsys_state_check(task, subsys_id, __c) \
rcu_dereference_check(task->cgroups->subsys[subsys_id], \
rcu_read_lock_held() || \
lockdep_is_held(&task->alloc_lock) || \
cgroup_lock_is_held() || (__c))

static inline struct cgroup_subsys_state *
task_subsys_state(struct task_struct *task, int subsys_id)
{
return rcu_dereference_check(task->cgroups->subsys[subsys_id],
rcu_read_lock_held() ||
lockdep_is_held(&task->alloc_lock) ||
cgroup_lock_is_held());
return task_subsys_state_check(task, subsys_id, false);
}

static inline struct cgroup* task_cgroup(struct task_struct *task,
Expand Down
115 changes: 59 additions & 56 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,52 +307,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD;
*/
struct task_group init_task_group;

/* return group to which a task belongs */
static inline struct task_group *task_group(struct task_struct *p)
{
struct task_group *tg;

#ifdef CONFIG_CGROUP_SCHED
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
tg = &init_task_group;
#endif
return tg;
}

/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
/*
* Strictly speaking this rcu_read_lock() is not needed since the
* task_group is tied to the cgroup, which in turn can never go away
* as long as there are tasks attached to it.
*
* However since task_group() uses task_subsys_state() which is an
* rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
*/
rcu_read_lock();
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
#endif

#ifdef CONFIG_RT_GROUP_SCHED
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
#endif
rcu_read_unlock();
}

#else

static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}

#endif /* CONFIG_CGROUP_SCHED */

/* CFS-related fields in a runqueue */
Expand Down Expand Up @@ -645,6 +599,49 @@ static inline int cpu_of(struct rq *rq)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))

#ifdef CONFIG_CGROUP_SCHED

/*
* Return the group to which this tasks belongs.
*
* We use task_subsys_state_check() and extend the RCU verification
* with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
* holds that lock for each task it moves into the cgroup. Therefore
* by holding that lock, we pin the task to the current cgroup.
*/
static inline struct task_group *task_group(struct task_struct *p)
{
struct cgroup_subsys_state *css;

css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
return container_of(css, struct task_group, css);
}

/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
#endif

#ifdef CONFIG_RT_GROUP_SCHED
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
#endif
}

#else /* CONFIG_CGROUP_SCHED */

static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}

#endif /* CONFIG_CGROUP_SCHED */

inline void update_rq_clock(struct rq *rq)
{
if (!rq->skip_clock_update)
Expand Down Expand Up @@ -4529,16 +4526,6 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
}

if (user) {
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0)
return -EPERM;
#endif

retval = security_task_setscheduler(p, policy, param);
if (retval)
return retval;
Expand All @@ -4554,6 +4541,22 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
* runqueue lock must be held.
*/
rq = __task_rq_lock(p);

#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0) {
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return -EPERM;
}
}
#endif

/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
Expand Down

0 comments on commit 0b5d338

Please sign in to comment.