Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 76152
b: refs/heads/master
c: 637f508
h: refs/heads/master
v: v3
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Jan 25, 2008
1 parent 69017cb commit 8e1c338
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 57d885fea0da0e9541d7730a9e1dcf734981a173
refs/heads/master: 637f50851b57a32f7ec67c50fc16f1601ab1a87a
7 changes: 7 additions & 0 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,13 @@ struct root_domain {
atomic_t refcount;
cpumask_t span;
cpumask_t online;

/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_t rto_mask;
atomic_t rto_count;
};

static struct root_domain def_root_domain;
Expand Down
57 changes: 31 additions & 26 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,14 @@

#ifdef CONFIG_SMP

/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
static cpumask_t rt_overload_mask;
static atomic_t rto_count;

static inline int rt_overloaded(void)
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rto_count);
return atomic_read(&rq->rd->rto_count);
}

static inline void rt_set_overload(struct rq *rq)
{
rq->rt.overloaded = 1;
cpu_set(rq->cpu, rt_overload_mask);
cpu_set(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
Expand All @@ -29,23 +21,25 @@ static inline void rt_set_overload(struct rq *rq)
* updated yet.
*/
wmb();
atomic_inc(&rto_count);
atomic_inc(&rq->rd->rto_count);
}

static inline void rt_clear_overload(struct rq *rq)
{
/* the order here really doesn't matter */
atomic_dec(&rto_count);
cpu_clear(rq->cpu, rt_overload_mask);
rq->rt.overloaded = 0;
atomic_dec(&rq->rd->rto_count);
cpu_clear(rq->cpu, rq->rd->rto_mask);
}

static void update_rt_migration(struct rq *rq)
{
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
rt_set_overload(rq);
else
rq->rt.overloaded = 1;
} else {
rt_clear_overload(rq);
rq->rt.overloaded = 0;
}
}
#endif /* CONFIG_SMP */

Expand Down Expand Up @@ -306,7 +300,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
int count = 0;
int cpu;

cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);
cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);

/*
* Scan each rq for the lowest prio.
Expand Down Expand Up @@ -580,18 +574,12 @@ static int pull_rt_task(struct rq *this_rq)
struct task_struct *p, *next;
struct rq *src_rq;

/*
* If cpusets are used, and we have overlapping
* run queue cpusets, then this algorithm may not catch all.
* This is just the price you pay on trying to keep
* dirtying caches down on large SMP machines.
*/
if (likely(!rt_overloaded()))
if (likely(!rt_overloaded(this_rq)))
return 0;

next = pick_next_task_rt(this_rq);

for_each_cpu_mask(cpu, rt_overload_mask) {
for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;

Expand Down Expand Up @@ -811,6 +799,20 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
}
}

/* Assumes rq->lock is held */
static void join_domain_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
}

/* Assumes rq->lock is held */
static void leave_domain_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
}

static void set_curr_task_rt(struct rq *rq)
{
struct task_struct *p = rq->curr;
Expand Down Expand Up @@ -840,4 +842,7 @@ const struct sched_class rt_sched_class = {

.set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,

.join_domain = join_domain_rt,
.leave_domain = leave_domain_rt,
};

0 comments on commit 8e1c338

Please sign in to comment.