Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191411
b: refs/heads/master
c: 74f5187
h: refs/heads/master
i:
  191409: 9e62642
  191407: 567cf02
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 23, 2010
1 parent 0762ae9 commit deaf848
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 09a40af5240de02d848247ab82440ad75b31ab11
refs/heads/master: 74f5187ac873042f502227701ed1727e7c5fbfa9
80 changes: 67 additions & 13 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1815,7 +1815,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
}
#endif

static void calc_load_account_active(struct rq *this_rq);
static void calc_load_account_idle(struct rq *this_rq);
static void update_sysctl(void);
static int get_update_sysctl_factor(void);

Expand Down Expand Up @@ -2950,6 +2950,61 @@ static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun);

static long calc_load_fold_active(struct rq *this_rq)
{
long nr_active, delta = 0;

nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;

if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
}

return delta;
}

#ifdef CONFIG_NO_HZ
/*
* For NO_HZ we delay the active fold to the next LOAD_FREQ update.
*
* When making the ILB scale, we should try to pull this in as well.
*/
static atomic_long_t calc_load_tasks_idle;

static void calc_load_account_idle(struct rq *this_rq)
{
long delta;

delta = calc_load_fold_active(this_rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks_idle);
}

static long calc_load_fold_idle(void)
{
long delta = 0;

/*
* Its got a race, we don't care...
*/
if (atomic_long_read(&calc_load_tasks_idle))
delta = atomic_long_xchg(&calc_load_tasks_idle, 0);

return delta;
}
#else
static void calc_load_account_idle(struct rq *this_rq)
{
}

static inline long calc_load_fold_idle(void)
{
return 0;
}
#endif

/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
Expand Down Expand Up @@ -2996,20 +3051,22 @@ void calc_global_load(void)
}

/*
* Either called from update_cpu_load() or from a cpu going idle
* Called from update_cpu_load() to periodically update this CPU's
* active count.
*/
static void calc_load_account_active(struct rq *this_rq)
{
long nr_active, delta;
long delta;

nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;
if (time_before(jiffies, this_rq->calc_load_update))
return;

if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
delta = calc_load_fold_active(this_rq);
delta += calc_load_fold_idle();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
}

this_rq->calc_load_update += LOAD_FREQ;
}

/*
Expand Down Expand Up @@ -3041,10 +3098,7 @@ static void update_cpu_load(struct rq *this_rq)
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}

if (time_after_eq(jiffies, this_rq->calc_load_update)) {
this_rq->calc_load_update += LOAD_FREQ;
calc_load_account_active(this_rq);
}
calc_load_account_active(this_rq);
}

#ifdef CONFIG_SMP
Expand Down
3 changes: 1 addition & 2 deletions trunk/kernel/sched_idletask.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
/* adjust the active tasks as we might go into a long sleep */
calc_load_account_active(rq);
calc_load_account_idle(rq);
return rq->idle;
}

Expand Down

0 comments on commit deaf848

Please sign in to comment.