Skip to content

Commit

Permalink
sched: Remove irq time from available CPU power
Browse files Browse the repository at this point in the history
The idea was suggested by Peter Zijlstra here:

  http://marc.info/?l=linux-kernel&m=127476934517534&w=2

irq time is technically not available to the tasks running on the CPU.
This patch removes irq time from CPU power piggybacking on
sched_rt_avg_update().

Tested this by keeping CPU X busy with a network intensive task having 75%
oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven
cycle soakers on the system. Without this change, there will be two tasks on
each CPU. With this change, there is a single task on irq busy CPU X and
remaining 7 tasks are spread around among other 3 CPUs.

Signed-off-by: Venkatesh Pallipadi <venki@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1286237003-12406-8-git-send-email-venki@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Venkatesh Pallipadi authored and Ingo Molnar committed Oct 18, 2010
1 parent 305e683 commit aa48380
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 1 deletion.
18 changes: 18 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,10 @@ struct rq {
u64 avg_idle;
#endif

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif

/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
Expand Down Expand Up @@ -643,6 +647,7 @@ static inline struct task_group *task_group(struct task_struct *p)
#endif /* CONFIG_CGROUP_SCHED */

static u64 irq_time_cpu(int cpu);
static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);

inline void update_rq_clock(struct rq *rq)
{
Expand All @@ -654,6 +659,8 @@ inline void update_rq_clock(struct rq *rq)
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;

sched_irq_time_avg_update(rq, irq_time);
}
}

Expand Down Expand Up @@ -1985,13 +1992,24 @@ void account_system_vtime(struct task_struct *curr)
local_irq_restore(flags);
}

static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
{
if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
u64 delta_irq = curr_irq_time - rq->prev_irq_time;
rq->prev_irq_time = curr_irq_time;
sched_rt_avg_update(rq, delta_irq);
}
}

#else

static u64 irq_time_cpu(int cpu)
{
return 0;
}

static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }

#endif

#include "sched_idletask.c"
Expand Down
8 changes: 7 additions & 1 deletion kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2275,7 +2275,13 @@ unsigned long scale_rt_power(int cpu)
u64 total, available;

total = sched_avg_period() + (rq->clock - rq->age_stamp);
available = total - rq->rt_avg;

if (unlikely(total < rq->rt_avg)) {
/* Ensures that power won't end up being negative */
available = 0;
} else {
available = total - rq->rt_avg;
}

if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
Expand Down
5 changes: 5 additions & 0 deletions kernel/sched_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
* release the lock. Decreases scheduling overhead.
*/
SCHED_FEAT(OWNER_SPIN, 1)

/*
* Decrement CPU power based on irq activity
*/
SCHED_FEAT(NONIRQ_POWER, 1)

0 comments on commit aa48380

Please sign in to comment.