Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 87478
b: refs/heads/master
c: 4ae7d5c
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Mar 19, 2008
1 parent e24cf6f commit e573410
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 18 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6f3d09291b4982991680b61763b2541e53e2a95f
refs/heads/master: 4ae7d5cefd4aa3560e359a3b0f03e12adc8b5c86
3 changes: 3 additions & 0 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -929,6 +929,9 @@ struct sched_entity {
u64 vruntime;
u64 prev_sum_exec_runtime;

u64 last_wakeup;
u64 avg_overlap;

#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
Expand Down
5 changes: 4 additions & 1 deletion trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1855,10 +1855,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
schedstat_inc(p, se.nr_wakeups_remote);
update_rq_clock(rq);
activate_task(rq, p, 1);
check_preempt_curr(rq, p);
success = 1;

out_running:
check_preempt_curr(rq, p);

p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
Expand Down Expand Up @@ -1892,6 +1893,8 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;

#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
Expand Down
1 change: 1 addition & 0 deletions trunk/kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_start);
PN(se.vruntime);
PN(se.sum_exec_runtime);
PN(se.avg_overlap);

nr_switches = p->nvcsw + p->nivcsw;

Expand Down
58 changes: 42 additions & 16 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,21 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
account_entity_enqueue(cfs_rq, se);
}

static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
}

static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (!se->last_wakeup)
return;

update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
se->last_wakeup = 0;
}

static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
Expand All @@ -566,6 +581,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)

update_stats_dequeue(cfs_rq, se);
if (sleep) {
update_avg_stats(cfs_rq, se);
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
Expand Down Expand Up @@ -981,23 +997,31 @@ static inline int wake_idle(int cpu, struct task_struct *p)

#ifdef CONFIG_SMP

static const struct sched_class fair_sched_class;

static int
wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
int prev_cpu, int this_cpu, int sync, int idx,
unsigned long load, unsigned long this_load,
wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *p, int prev_cpu, int this_cpu, int sync,
int idx, unsigned long load, unsigned long this_load,
unsigned int imbalance)
{
struct task_struct *curr = this_rq->curr;
unsigned long tl = this_load;
unsigned long tl_per_task;

if (!(this_sd->flags & SD_WAKE_AFFINE))
return 0;

/*
* Attract cache-cold tasks on sync wakeups:
* If the currently running task will sleep within
* a reasonable amount of time then attract this newly
* woken task:
*/
if (sync && !task_hot(p, rq->clock, this_sd))
return 1;
if (sync && curr->sched_class == &fair_sched_class) {
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
p->se.avg_overlap < sysctl_sched_migration_cost)
return 1;
}

schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
Expand Down Expand Up @@ -1030,18 +1054,16 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
struct sched_domain *sd, *this_sd = NULL;
int prev_cpu, this_cpu, new_cpu;
unsigned long load, this_load;
struct rq *rq, *this_rq;
unsigned int imbalance;
struct rq *rq;
int idx;

prev_cpu = task_cpu(p);
rq = task_rq(p);
this_cpu = smp_processor_id();
this_rq = cpu_rq(this_cpu);
new_cpu = prev_cpu;

if (prev_cpu == this_cpu)
goto out;

/*
* 'this_sd' is the first domain that both
* this_cpu and prev_cpu are present in:
Expand Down Expand Up @@ -1069,11 +1091,12 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);

if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx,
load, this_load, imbalance)) {
new_cpu = this_cpu;
if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
load, this_load, imbalance))
return this_cpu;

if (prev_cpu == this_cpu)
goto out;
}

/*
* Start passive balancing when half the imbalance_pct
Expand All @@ -1083,8 +1106,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
if (imbalance*this_load <= 100*load) {
schedstat_inc(this_sd, ttwu_move_balance);
schedstat_inc(p, se.nr_wakeups_passive);
new_cpu = this_cpu;
goto out;
return this_cpu;
}
}

Expand All @@ -1111,6 +1133,10 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
return;
}

se->last_wakeup = se->sum_exec_runtime;
if (unlikely(se == pse))
return;

cfs_rq_of(pse)->next = pse;

/*
Expand Down

0 comments on commit e573410

Please sign in to comment.