Skip to content

Commit

Permalink
sched: persistent average load per task
Browse files Browse the repository at this point in the history
Remove the fall-back to SCHED_LOAD_SCALE by remembering the previous value of
cpu_avg_load_per_task() - this is useful because of the hierarchical group
model in which task weight can be much smaller.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 27, 2008
1 parent 039a1c4 commit a8a51d5
Showing 1 changed file with 12 additions and 13 deletions.
25 changes: 12 additions & 13 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,8 @@ struct rq {
int cpu;
int online;

unsigned long avg_load_per_task;

struct task_struct *migration_thread;
struct list_head migration_queue;
#endif
Expand Down Expand Up @@ -1427,9 +1429,18 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static unsigned long cpu_avg_load_per_task(int cpu);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);

static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);

if (rq->nr_running)
rq->avg_load_per_task = rq->load.weight / rq->nr_running;

return rq->avg_load_per_task;
}

#ifdef CONFIG_FAIR_GROUP_SCHED

typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
Expand Down Expand Up @@ -2010,18 +2021,6 @@ static unsigned long target_load(int cpu, int type)
return max(rq->cpu_load[type-1], total);
}

/*
* Return the average load per task on the cpu's run queue
*/
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
unsigned long n = rq->nr_running;

return n ? total / n : SCHED_LOAD_SCALE;
}

/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
Expand Down

0 comments on commit a8a51d5

Please sign in to comment.