Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 145714
b: refs/heads/master
c: dce48a8
h: refs/heads/master
v: v3
  • Loading branch information
Thomas Gleixner committed May 15, 2009
1 parent 6a09406 commit aa87f4f
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 66 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2ff799d3cff1ecb274049378b28120ee5c1c5e5f
refs/heads/master: dce48a84adf1806676319f6f480e30a6daa012f9
2 changes: 1 addition & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
extern void calc_global_load(void);

extern unsigned long get_parent_ip(unsigned long addr);

Expand Down
84 changes: 74 additions & 10 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,10 @@ struct rq {
struct list_head migration_queue;
#endif

/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;

#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMP
int hrtick_csd_pending;
Expand Down Expand Up @@ -1728,6 +1732,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
}
#endif

static void calc_load_account_active(struct rq *this_rq);

#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
Expand Down Expand Up @@ -2856,19 +2862,57 @@ unsigned long nr_iowait(void)
return sum;
}

unsigned long nr_active(void)
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun);

static unsigned long
calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
unsigned long i, running = 0, uninterruptible = 0;
load *= exp;
load += active * (FIXED_1 - exp);
return load >> FSHIFT;
}

for_each_online_cpu(i) {
running += cpu_rq(i)->nr_running;
uninterruptible += cpu_rq(i)->nr_uninterruptible;
}
/*
* calc_load - update the avenrun load estimates 10 ticks after the
* CPUs have updated calc_load_tasks.
*/
void calc_global_load(void)
{
unsigned long upd = calc_load_update + 10;
long active;

if (time_before(jiffies, upd))
return;

if (unlikely((long)uninterruptible < 0))
uninterruptible = 0;
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;

return running + uninterruptible;
avenrun[0] = calc_load(avenrun[0], EXP_1, active);
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);

calc_load_update += LOAD_FREQ;
}

/*
* Either called from update_cpu_load() or from a cpu going idle
*/
static void calc_load_account_active(struct rq *this_rq)
{
long nr_active, delta;

nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;

if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
atomic_long_add(delta, &calc_load_tasks);
}
}

/*
Expand Down Expand Up @@ -2899,6 +2943,11 @@ static void update_cpu_load(struct rq *this_rq)
new_load += scale-1;
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}

if (time_after_eq(jiffies, this_rq->calc_load_update)) {
this_rq->calc_load_update += LOAD_FREQ;
calc_load_account_active(this_rq);
}
}

#ifdef CONFIG_SMP
Expand Down Expand Up @@ -7091,6 +7140,14 @@ static void migrate_dead_tasks(unsigned int dead_cpu)

}
}

/*
* remove the tasks which were accounted by rq from calc_load_tasks.
*/
static void calc_global_load_remove(struct rq *rq)
{
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
}
#endif /* CONFIG_HOTPLUG_CPU */

#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
Expand Down Expand Up @@ -7325,6 +7382,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
spin_lock_irqsave(&rq->lock, flags);
rq->calc_load_update = calc_load_update;
rq->calc_load_active = 0;
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));

Expand Down Expand Up @@ -7364,7 +7423,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);

calc_global_load_remove(rq);
/*
* No need to migrate the tasks: it was best-effort if
* they didn't take sched_hotcpu_mutex. Just wake up
Expand Down Expand Up @@ -9059,6 +9118,8 @@ void __init sched_init(void)
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
Expand Down Expand Up @@ -9166,6 +9227,9 @@ void __init sched_init(void)
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());

calc_load_update = jiffies + LOAD_FREQ;

/*
* During early bootup we pretend to be a normal task:
*/
Expand Down
3 changes: 2 additions & 1 deletion trunk/kernel/sched_idletask.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sy
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);

/* adjust the active tasks as we might go into a long sleep */
calc_load_account_active(rq);
return rq->idle;
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/time/timekeeping.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

/*
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
* playing with xtime.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);

Expand Down
54 changes: 2 additions & 52 deletions trunk/kernel/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1122,47 +1122,6 @@ void update_process_times(int user_tick)
run_posix_cpu_timers(p);
}

/*
* Nr of active tasks - counted in fixed-point numbers
*/
static unsigned long count_active_tasks(void)
{
return nr_active() * FIXED_1;
}

/*
* Hmm.. Changed this, as the GNU make sources (load.c) seems to
* imply that avenrun[] is the standard name for this kind of thing.
* Nothing else seems to be standardized: the fractional size etc
* all seem to differ on different machines.
*
* Requires xtime_lock to access.
*/
unsigned long avenrun[3];

EXPORT_SYMBOL(avenrun);

/*
* calc_load - given tick count, update the avenrun load estimates.
* This is called while holding a write_lock on xtime_lock.
*/
static inline void calc_load(unsigned long ticks)
{
unsigned long active_tasks; /* fixed-point */
static int count = LOAD_FREQ;

count -= ticks;
if (unlikely(count < 0)) {
active_tasks = count_active_tasks();
do {
CALC_LOAD(avenrun[0], EXP_1, active_tasks);
CALC_LOAD(avenrun[1], EXP_5, active_tasks);
CALC_LOAD(avenrun[2], EXP_15, active_tasks);
count += LOAD_FREQ;
} while (count < 0);
}
}

/*
* This function runs timers and the timer-tq in bottom half context.
*/
Expand All @@ -1186,16 +1145,6 @@ void run_local_timers(void)
softlockup_tick();
}

/*
* Called by the timer interrupt. xtime_lock must already be taken
* by the timer IRQ!
*/
static inline void update_times(unsigned long ticks)
{
update_wall_time();
calc_load(ticks);
}

/*
* The 64-bit jiffies value is not atomic - you MUST NOT read it
* without sampling the sequence number in xtime_lock.
Expand All @@ -1205,7 +1154,8 @@ static inline void update_times(unsigned long ticks)
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
update_times(ticks);
update_wall_time();
calc_global_load();
}

#ifdef __ARCH_WANT_SYS_ALARM
Expand Down

0 comments on commit aa87f4f

Please sign in to comment.