Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 69037
b: refs/heads/master
c: bbdba7c
h: refs/heads/master
i:
  69035: 39809f0
v: v3
  • Loading branch information
Ingo Molnar committed Oct 15, 2007
1 parent 7c747fa commit 330d11d
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 162 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e22f5bbf86d8cce710d5c8ba5bf57832e73aab8c
refs/heads/master: bbdba7c0e1161934ae881ad00e4db49830f5ef59
9 changes: 0 additions & 9 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -888,13 +888,9 @@ struct load_weight {
* 4 se->block_start
* 4 se->run_node
* 4 se->sleep_start
* 4 se->sleep_start_fair
* 6 se->load.weight
* 7 se->delta_fair
* 15 se->wait_runtime
*/
struct sched_entity {
long wait_runtime;
s64 fair_key;
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
Expand All @@ -904,12 +900,10 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;
u64 wait_start_fair;

#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
s64 sum_wait_runtime;

u64 sleep_start;
u64 sleep_max;
Expand All @@ -919,9 +913,6 @@ struct sched_entity {
u64 block_max;
u64 exec_max;
u64 slice_max;

unsigned long wait_runtime_overruns;
unsigned long wait_runtime_underruns;
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
Expand Down
38 changes: 5 additions & 33 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,11 +176,8 @@ struct cfs_rq {
struct load_weight load;
unsigned long nr_running;

s64 fair_clock;
u64 exec_clock;
u64 min_vruntime;
s64 wait_runtime;
unsigned long wait_runtime_overruns, wait_runtime_underruns;

struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
Expand Down Expand Up @@ -389,20 +386,14 @@ static void update_rq_clock(struct rq *rq)
* Debugging: various feature bits
*/
enum {
SCHED_FEAT_FAIR_SLEEPERS = 1,
SCHED_FEAT_NEW_FAIR_SLEEPERS = 2,
SCHED_FEAT_SLEEPER_AVG = 4,
SCHED_FEAT_SLEEPER_LOAD_AVG = 8,
SCHED_FEAT_START_DEBIT = 16,
SCHED_FEAT_USE_TREE_AVG = 32,
SCHED_FEAT_APPROX_AVG = 64,
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
SCHED_FEAT_START_DEBIT = 2,
SCHED_FEAT_USE_TREE_AVG = 4,
SCHED_FEAT_APPROX_AVG = 8,
};

const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_FAIR_SLEEPERS *0 |
SCHED_FEAT_NEW_FAIR_SLEEPERS *1 |
SCHED_FEAT_SLEEPER_AVG *0 |
SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
SCHED_FEAT_START_DEBIT *1 |
SCHED_FEAT_USE_TREE_AVG *0 |
SCHED_FEAT_APPROX_AVG *0;
Expand Down Expand Up @@ -716,15 +707,11 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
if (sched_feat(FAIR_SLEEPERS))
lw->inv_weight = WMULT_CONST / lw->weight;
}

static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight))
lw->inv_weight = WMULT_CONST / lw->weight;
}

/*
Expand Down Expand Up @@ -848,8 +835,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)

static void set_load_weight(struct task_struct *p)
{
p->se.wait_runtime = 0;

if (task_has_rt_policy(p)) {
p->se.load.weight = prio_to_weight[0] * 2;
p->se.load.inv_weight = prio_to_wmult[0] >> 1;
Expand Down Expand Up @@ -995,13 +980,9 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
int old_cpu = task_cpu(p);
struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
u64 clock_offset, fair_clock_offset;
u64 clock_offset;

clock_offset = old_rq->clock - new_rq->clock;
fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;

if (p->se.wait_start_fair)
p->se.wait_start_fair -= fair_clock_offset;

#ifdef CONFIG_SCHEDSTATS
if (p->se.wait_start)
Expand Down Expand Up @@ -1571,15 +1552,12 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
*/
static void __sched_fork(struct task_struct *p)
{
p->se.wait_start_fair = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.wait_runtime = 0;

#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
p->se.sum_wait_runtime = 0;
p->se.sum_sleep_runtime = 0;
p->se.sleep_start = 0;
p->se.block_start = 0;
Expand All @@ -1588,8 +1566,6 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_max = 0;
p->se.slice_max = 0;
p->se.wait_max = 0;
p->se.wait_runtime_overruns = 0;
p->se.wait_runtime_underruns = 0;
#endif

INIT_LIST_HEAD(&p->run_list);
Expand Down Expand Up @@ -6436,7 +6412,6 @@ int in_sched_functions(unsigned long addr)
static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
cfs_rq->fair_clock = 1;
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
#endif
Expand Down Expand Up @@ -6562,15 +6537,12 @@ void normalize_rt_tasks(void)
read_lock_irq(&tasklist_lock);
do_each_thread(g, p) {
p->se.fair_key = 0;
p->se.wait_runtime = 0;
p->se.exec_start = 0;
p->se.wait_start_fair = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
p->se.sleep_start = 0;
p->se.block_start = 0;
#endif
task_rq(p)->cfs.fair_clock = 0;
task_rq(p)->clock = 0;

if (!rt_task(p)) {
Expand Down
54 changes: 5 additions & 49 deletions trunk/kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,21 +36,16 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
else
SEQ_printf(m, " ");

SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d ",
SEQ_printf(m, "%15s %5d %15Ld %13Ld %5d ",
p->comm, p->pid,
(long long)p->se.fair_key,
(long long)(p->se.fair_key - rq->cfs.fair_clock),
(long long)p->se.wait_runtime,
(long long)(p->nvcsw + p->nivcsw),
p->prio);
#ifdef CONFIG_SCHEDSTATS
SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld %15Ld\n",
SEQ_printf(m, "%15Ld %15Ld %15Ld\n",
(long long)p->se.vruntime,
(long long)p->se.sum_exec_runtime,
(long long)p->se.sum_wait_runtime,
(long long)p->se.sum_sleep_runtime,
(long long)p->se.wait_runtime_overruns,
(long long)p->se.wait_runtime_underruns);
(long long)p->se.sum_sleep_runtime);
#else
SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
0LL, 0LL, 0LL, 0LL, 0LL);
Expand All @@ -63,10 +58,8 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)

SEQ_printf(m,
"\nrunnable tasks:\n"
" task PID tree-key delta waiting"
" switches prio"
" exec-runtime sum-exec sum-wait sum-sleep"
" wait-overrun wait-underrun\n"
" task PID tree-key switches prio"
" exec-runtime sum-exec sum-sleep\n"
"------------------------------------------------------------------"
"--------------------------------"
"------------------------------------------------"
Expand All @@ -84,29 +77,6 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_unlock_irq(&tasklist_lock);
}

static void
print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
s64 wait_runtime_rq_sum = 0;
struct task_struct *p;
struct rb_node *curr;
unsigned long flags;
struct rq *rq = &per_cpu(runqueues, cpu);

spin_lock_irqsave(&rq->lock, flags);
curr = first_fair(cfs_rq);
while (curr) {
p = rb_entry(curr, struct task_struct, se.run_node);
wait_runtime_rq_sum += p->se.wait_runtime;

curr = rb_next(curr);
}
spin_unlock_irqrestore(&rq->lock, flags);

SEQ_printf(m, " .%-30s: %Ld\n", "wait_runtime_rq_sum",
(long long)wait_runtime_rq_sum);
}

void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
Expand All @@ -120,7 +90,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define P(x) \
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))

P(fair_clock);
P(exec_clock);

spin_lock_irqsave(&rq->lock, flags);
Expand All @@ -144,13 +113,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
spread0 = min_vruntime - rq0_min_vruntime;
SEQ_printf(m, " .%-30s: %Ld\n", "spread0",
(long long)spread0);

P(wait_runtime);
P(wait_runtime_overruns);
P(wait_runtime_underruns);
#undef P

print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
}

static void print_cpu(struct seq_file *m, int cpu)
Expand Down Expand Up @@ -268,8 +231,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#define P(F) \
SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)

P(se.wait_runtime);
P(se.wait_start_fair);
P(se.exec_start);
P(se.vruntime);
P(se.sum_exec_runtime);
Expand All @@ -283,9 +244,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.exec_max);
P(se.slice_max);
P(se.wait_max);
P(se.wait_runtime_overruns);
P(se.wait_runtime_underruns);
P(se.sum_wait_runtime);
#endif
SEQ_printf(m, "%-25s:%20Ld\n",
"nr_switches", (long long)(p->nvcsw + p->nivcsw));
Expand All @@ -312,8 +270,6 @@ void proc_sched_set_task(struct task_struct *p)
p->se.exec_max = 0;
p->se.slice_max = 0;
p->se.wait_max = 0;
p->se.wait_runtime_overruns = 0;
p->se.wait_runtime_underruns = 0;
#endif
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
Expand Down
Loading

0 comments on commit 330d11d

Please sign in to comment.