Skip to content

Commit

Permalink
sched: Extend scheduler's asym packing
Browse files Browse the repository at this point in the history
We generalize the scheduler's asym packing to provide an ordering
of the cpu beyond just the cpu number.  This allows the use of the
ASYM_PACKING scheduler machinery to move loads to preferred CPU in a
sched domain. The preference is defined with the cpu priority
given by arch_asym_cpu_priority(cpu).

We also record the most preferred cpu in a sched group when
we build the cpu's capacity for fast lookup of preferred cpu
during load balancing.

Co-developed-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-pm@vger.kernel.org
Cc: jolsa@redhat.com
Cc: rjw@rjwysocki.net
Cc: linux-acpi@vger.kernel.org
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: bp@suse.de
Link: http://lkml.kernel.org/r/0e73ae12737dfaafa46c07066cc7c5d3f1675e46.1479844244.git.tim.c.chen@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Tim Chen authored and Thomas Gleixner committed Nov 24, 2016
1 parent 2b4d5b2 commit afe06ef
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 17 deletions.
2 changes: 2 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1077,6 +1077,8 @@ static inline int cpu_numa_flags(void)
}
#endif

extern int arch_asym_cpu_priority(int cpu);

struct sched_domain_attr {
int relax_domain_level;
};
Expand Down
15 changes: 15 additions & 0 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -6303,7 +6303,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
WARN_ON(!sg);

do {
int cpu, max_cpu = -1;

sg->group_weight = cpumask_weight(sched_group_cpus(sg));

if (!(sd->flags & SD_ASYM_PACKING))
goto next;

for_each_cpu(cpu, sched_group_cpus(sg)) {
if (max_cpu < 0)
max_cpu = cpu;
else if (sched_asym_prefer(cpu, max_cpu))
max_cpu = cpu;
}
sg->asym_prefer_cpu = max_cpu;

next:
sg = sg->next;
} while (sg != sd->groups);

Expand Down
53 changes: 36 additions & 17 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,16 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;

const_debug unsigned int sysctl_sched_migration_cost = 500000UL;

#ifdef CONFIG_SMP
/*
* For asym packing, by default the lower numbered cpu has higher priority.
*/
int __weak arch_asym_cpu_priority(int cpu)
{
return -cpu;
}
#endif

#ifdef CONFIG_CFS_BANDWIDTH
/*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
Expand Down Expand Up @@ -7388,16 +7398,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (env->idle == CPU_NOT_IDLE)
return true;
/*
* ASYM_PACKING needs to move all the work to the lowest
* numbered CPUs in the group, therefore mark all groups
* higher than ourself as busy.
* ASYM_PACKING needs to move all the work to the highest
* prority CPUs in the group, therefore mark all groups
* of lower priority than ourself as busy.
*/
if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
if (sgs->sum_nr_running &&
sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
if (!sds->busiest)
return true;

/* Prefer to move from highest possible cpu's work */
if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
/* Prefer to move from lowest priority cpu's work */
if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
sg->asym_prefer_cpu))
return true;
}

Expand Down Expand Up @@ -7549,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
if (!sds->busiest)
return 0;

busiest_cpu = group_first_cpu(sds->busiest);
if (env->dst_cpu > busiest_cpu)
busiest_cpu = sds->busiest->asym_prefer_cpu;
if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
return 0;

env->imbalance = DIV_ROUND_CLOSEST(
Expand Down Expand Up @@ -7888,10 +7900,11 @@ static int need_active_balance(struct lb_env *env)

/*
* ASYM_PACKING needs to force migrate tasks from busy but
* higher numbered CPUs in order to pack all tasks in the
* lowest numbered CPUs.
* lower priority CPUs in order to pack all tasks in the
* highest priority CPUs.
*/
if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
if ((sd->flags & SD_ASYM_PACKING) &&
sched_asym_prefer(env->dst_cpu, env->src_cpu))
return 1;
}

Expand Down Expand Up @@ -8740,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
unsigned long now = jiffies;
struct sched_domain_shared *sds;
struct sched_domain *sd;
int nr_busy, cpu = rq->cpu;
int nr_busy, i, cpu = rq->cpu;
bool kick = false;

if (unlikely(rq->idle_balance))
Expand Down Expand Up @@ -8791,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq)
}

sd = rcu_dereference(per_cpu(sd_asym, cpu));
if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
sched_domain_span(sd)) < cpu)) {
kick = true;
goto unlock;
}
if (sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (i == cpu ||
!cpumask_test_cpu(i, nohz.idle_cpus_mask))
continue;

if (sched_asym_prefer(i, cpu)) {
kick = true;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return kick;
Expand Down
6 changes: 6 additions & 0 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,11 @@ struct dl_rq {

#ifdef CONFIG_SMP

static inline bool sched_asym_prefer(int a, int b)
{
return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
}

/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
Expand Down Expand Up @@ -908,6 +913,7 @@ struct sched_group {

unsigned int group_weight;
struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* cpu of highest priority in group */

/*
* The CPUs this group covers.
Expand Down

0 comments on commit afe06ef

Please sign in to comment.