Skip to content

Commit

Permalink
sched: optimize ttwu vs group scheduling
Browse files Browse the repository at this point in the history
Impact: micro-optimization

We can avoid the sched domain walk on try_to_wake_up() when we know
there are no groups.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1236603381.8389.455.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Mar 10, 2009
1 parent 8c54436 commit 57310a9
Showing 1 changed file with 15 additions and 1 deletion.
16 changes: 15 additions & 1 deletion kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
*/
static DEFINE_SPINLOCK(task_group_lock);

#ifdef CONFIG_SMP
static int root_task_group_empty(void)
{
return list_empty(&root_task_group.children);
}
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
Expand Down Expand Up @@ -391,6 +398,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)

#else

#ifdef CONFIG_SMP
static int root_task_group_empty(void)
{
return 1;
}
#endif

static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
Expand Down Expand Up @@ -2318,7 +2332,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
sync = 0;

#ifdef CONFIG_SMP
if (sched_feat(LB_WAKEUP_UPDATE)) {
if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
struct sched_domain *sd;

this_cpu = raw_smp_processor_id();
Expand Down

0 comments on commit 57310a9

Please sign in to comment.