Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 13459
b: refs/heads/master
c: b910472
h: refs/heads/master
i:
  13457: ddf112c
  13455: 3026940
v: v3
  • Loading branch information
Con Kolivas authored and Linus Torvalds committed Nov 9, 2005
1 parent cfc1d83 commit 8881525
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b54134be53be720da423692665ec215eb14a678b
refs/heads/master: b910472dd3b7c1d51af9a594a759f642520c33e1
101 changes: 83 additions & 18 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ struct runqueue {
*/
unsigned long nr_running;
#ifdef CONFIG_SMP
unsigned long prio_bias;
unsigned long cpu_load[3];
#endif
unsigned long long nr_switches;
Expand Down Expand Up @@ -659,13 +660,45 @@ static int effective_prio(task_t *p)
return prio;
}

#ifdef CONFIG_SMP
static inline void inc_prio_bias(runqueue_t *rq, int static_prio)
{
rq->prio_bias += MAX_PRIO - static_prio;
}

static inline void dec_prio_bias(runqueue_t *rq, int static_prio)
{
rq->prio_bias -= MAX_PRIO - static_prio;
}
#else
static inline void inc_prio_bias(runqueue_t *rq, int static_prio)
{
}

static inline void dec_prio_bias(runqueue_t *rq, int static_prio)
{
}
#endif

static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running++;
inc_prio_bias(rq, p->static_prio);
}

static inline void dec_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running--;
dec_prio_bias(rq, p->static_prio);
}

/*
* __activate_task - move a task to the runqueue.
*/
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
enqueue_task(p, rq->active);
rq->nr_running++;
inc_nr_running(p, rq);
}

/*
Expand All @@ -674,7 +707,7 @@ static inline void __activate_task(task_t *p, runqueue_t *rq)
static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
{
enqueue_task_head(p, rq->active);
rq->nr_running++;
inc_nr_running(p, rq);
}

static int recalc_task_prio(task_t *p, unsigned long long now)
Expand Down Expand Up @@ -793,7 +826,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
*/
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
dec_nr_running(p, rq);
dequeue_task(p, p->array);
p->array = NULL;
}
Expand Down Expand Up @@ -930,27 +963,54 @@ void kick_process(task_t *p)
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
static inline unsigned long source_load(int cpu, int type)
static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
unsigned long cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE;

if (idle == NOT_IDLE) {
/*
* If we are balancing busy runqueues the load is biased by
* priority to create 'nice' support across cpus.
*/
cpu_load *= rq->prio_bias;
load_now *= rq->prio_bias;
}

if (type == 0)
return load_now;

return min(rq->cpu_load[type-1], load_now);
return min(cpu_load, load_now);
}

static inline unsigned long source_load(int cpu, int type)
{
return __source_load(cpu, type, NOT_IDLE);
}

/*
* Return a high guess at the load of a migration-target cpu
*/
static inline unsigned long target_load(int cpu, int type)
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
unsigned long cpu_load = rq->cpu_load[type-1],
load_now = rq->nr_running * SCHED_LOAD_SCALE;

if (type == 0)
return load_now;

return max(rq->cpu_load[type-1], load_now);
if (idle == NOT_IDLE) {
cpu_load *= rq->prio_bias;
load_now *= rq->prio_bias;
}
return max(cpu_load, load_now);
}

static inline unsigned long target_load(int cpu, int type)
{
return __target_load(cpu, type, NOT_IDLE);
}

/*
Expand Down Expand Up @@ -1411,7 +1471,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
list_add_tail(&p->run_list, &current->run_list);
p->array = current->array;
p->array->nr_active++;
rq->nr_running++;
inc_nr_running(p, rq);
}
set_need_resched();
} else
Expand Down Expand Up @@ -1756,9 +1816,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
{
dequeue_task(p, src_array);
src_rq->nr_running--;
dec_nr_running(p, src_rq);
set_task_cpu(p, this_cpu);
this_rq->nr_running++;
inc_nr_running(p, this_rq);
enqueue_task(p, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
Expand Down Expand Up @@ -1937,9 +1997,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,

/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
load = __target_load(i, load_idx, idle);
else
load = source_load(i, load_idx);
load = __source_load(i, load_idx, idle);

avg_load += load;
}
Expand Down Expand Up @@ -2044,14 +2104,15 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static runqueue_t *find_busiest_queue(struct sched_group *group)
static runqueue_t *find_busiest_queue(struct sched_group *group,
enum idle_type idle)
{
unsigned long load, max_load = 0;
runqueue_t *busiest = NULL;
int i;

for_each_cpu_mask(i, group->cpumask) {
load = source_load(i, 0);
load = __source_load(i, 0, idle);

if (load > max_load) {
max_load = load;
Expand Down Expand Up @@ -2095,7 +2156,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}

busiest = find_busiest_queue(group);
busiest = find_busiest_queue(group, idle);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
Expand Down Expand Up @@ -2218,7 +2279,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}

busiest = find_busiest_queue(group);
busiest = find_busiest_queue(group, NEWLY_IDLE);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
goto out_balanced;
Expand Down Expand Up @@ -3447,7 +3508,9 @@ void set_user_nice(task_t *p, long nice)
* not SCHED_NORMAL:
*/
if (rt_task(p)) {
dec_prio_bias(rq, p->static_prio);
p->static_prio = NICE_TO_PRIO(nice);
inc_prio_bias(rq, p->static_prio);
goto out_unlock;
}
array = p->array;
Expand All @@ -3457,7 +3520,9 @@ void set_user_nice(task_t *p, long nice)
old_prio = p->prio;
new_prio = NICE_TO_PRIO(nice);
delta = new_prio - old_prio;
dec_prio_bias(rq, p->static_prio);
p->static_prio = NICE_TO_PRIO(nice);
inc_prio_bias(rq, p->static_prio);
p->prio += delta;

if (array) {
Expand Down

0 comments on commit 8881525

Please sign in to comment.