Skip to content

Commit

Permalink
sched, cpuset: customize sched domains, core
Browse files Browse the repository at this point in the history
[rebased for sched-devel/latest]

 - Add a new cpuset file, having levels:
     sched_relax_domain_level

 - Modify partition_sched_domains() and build_sched_domains()
   to take attributes parameter passed from cpuset.

 - Fill newidle_idx for node domains which currently unused but
   might be required if sched_relax_domain_level become higher.

 - We can change the default level by boot option 'relax_domain_level='.

Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Hidetoshi Seto authored and Ingo Molnar committed Apr 19, 2008
1 parent 4d5f355 commit 1d3504f
Show file tree
Hide file tree
Showing 7 changed files with 161 additions and 11 deletions.
2 changes: 1 addition & 1 deletion include/asm-ia64/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void build_cpu_to_node_map(void);
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
.newidle_idx = 0, /* unused */ \
.newidle_idx = 2, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
Expand Down
2 changes: 1 addition & 1 deletion include/asm-sh/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
.newidle_idx = 0, \
.newidle_idx = 2, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
Expand Down
2 changes: 1 addition & 1 deletion include/asm-x86/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ extern unsigned long node_remap_size[];

# define SD_CACHE_NICE_TRIES 2
# define SD_IDLE_IDX 2
# define SD_NEWIDLE_IDX 0
# define SD_NEWIDLE_IDX 2
# define SD_FORKEXEC_IDX 1

#endif
Expand Down
23 changes: 22 additions & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -704,6 +704,7 @@ enum cpu_idle_type {
#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */

#define BALANCE_FOR_MC_POWER \
(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
Expand Down Expand Up @@ -733,6 +734,24 @@ struct sched_group {
u32 reciprocal_cpu_power;
};

enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
SD_LV_MC,
SD_LV_CPU,
SD_LV_NODE,
SD_LV_ALLNODES,
SD_LV_MAX
};

struct sched_domain_attr {
int relax_domain_level;
};

#define SD_ATTR_INIT (struct sched_domain_attr) { \
.relax_domain_level = -1, \
}

struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
Expand All @@ -750,6 +769,7 @@ struct sched_domain {
unsigned int wake_idx;
unsigned int forkexec_idx;
int flags; /* See SD_* */
enum sched_domain_level level;

/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
Expand Down Expand Up @@ -789,7 +809,8 @@ struct sched_domain {
#endif
};

extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);

#endif /* CONFIG_SMP */
Expand Down
61 changes: 60 additions & 1 deletion kernel/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,9 @@ struct cpuset {
/* partition number for rebuild_sched_domains() */
int pn;

/* for custom sched domain */
int relax_domain_level;

/* used for walking a cpuset heirarchy */
struct list_head stack_list;
};
Expand Down Expand Up @@ -478,6 +481,16 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
}

static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
if (!dattr)
return;
if (dattr->relax_domain_level < c->relax_domain_level)
dattr->relax_domain_level = c->relax_domain_level;
return;
}

/*
* rebuild_sched_domains()
*
Expand Down Expand Up @@ -553,19 +566,26 @@ static void rebuild_sched_domains(void)
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
cpumask_t *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms; /* number of sched domains in result */
int nslot; /* next empty doms[] cpumask_t slot */

q = NULL;
csa = NULL;
doms = NULL;
dattr = NULL;

/* Special case for the 99% of systems with one, full, sched domain */
if (is_sched_load_balance(&top_cpuset)) {
ndoms = 1;
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
if (!doms)
goto rebuild;
dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
if (dattr) {
*dattr = SD_ATTR_INIT;
update_domain_attr(dattr, &top_cpuset);
}
*doms = top_cpuset.cpus_allowed;
goto rebuild;
}
Expand Down Expand Up @@ -622,6 +642,7 @@ static void rebuild_sched_domains(void)
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
if (!doms)
goto rebuild;
dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);

for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
Expand All @@ -644,12 +665,15 @@ static void rebuild_sched_domains(void)
}

cpus_clear(*dp);
if (dattr)
*(dattr + nslot) = SD_ATTR_INIT;
for (j = i; j < csn; j++) {
struct cpuset *b = csa[j];

if (apn == b->pn) {
cpus_or(*dp, *dp, b->cpus_allowed);
b->pn = -1;
update_domain_attr(dattr, b);
}
}
nslot++;
Expand All @@ -660,14 +684,15 @@ static void rebuild_sched_domains(void)
rebuild:
/* Have scheduler rebuild sched domains */
get_online_cpus();
partition_sched_domains(ndoms, doms);
partition_sched_domains(ndoms, doms, dattr);
put_online_cpus();

done:
if (q && !IS_ERR(q))
kfifo_free(q);
kfree(csa);
/* Don't kfree(doms) -- partition_sched_domains() does that. */
/* Don't kfree(dattr) -- partition_sched_domains() does that. */
}

static inline int started_after_time(struct task_struct *t1,
Expand Down Expand Up @@ -1011,6 +1036,21 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
return 0;
}

static int update_relax_domain_level(struct cpuset *cs, char *buf)
{
int val = simple_strtol(buf, NULL, 10);

if (val < 0)
val = -1;

if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
rebuild_sched_domains();
}

return 0;
}

/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
Expand Down Expand Up @@ -1202,6 +1242,7 @@ typedef enum {
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_SCHED_LOAD_BALANCE,
FILE_SCHED_RELAX_DOMAIN_LEVEL,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
Expand Down Expand Up @@ -1256,6 +1297,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
retval = update_relax_domain_level(cs, buffer);
break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
break;
Expand Down Expand Up @@ -1354,6 +1398,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
case FILE_SCHED_LOAD_BALANCE:
*s++ = is_sched_load_balance(cs) ? '1' : '0';
break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
s += sprintf(s, "%d", cs->relax_domain_level);
break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
break;
Expand Down Expand Up @@ -1424,6 +1471,13 @@ static struct cftype cft_sched_load_balance = {
.private = FILE_SCHED_LOAD_BALANCE,
};

static struct cftype cft_sched_relax_domain_level = {
.name = "sched_relax_domain_level",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
};

static struct cftype cft_memory_migrate = {
.name = "memory_migrate",
.read = cpuset_common_file_read,
Expand Down Expand Up @@ -1475,6 +1529,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss,
&cft_sched_relax_domain_level)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
Expand Down Expand Up @@ -1559,6 +1616,7 @@ static struct cgroup_subsys_state *cpuset_create(
nodes_clear(cs->mems_allowed);
cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;

cs->parent = parent;
number_of_cpusets++;
Expand Down Expand Up @@ -1631,6 +1689,7 @@ int __init cpuset_init(void)
fmeter_init(&top_cpuset.fmeter);
top_cpuset.mems_generation = cpuset_mems_generation++;
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
top_cpuset.relax_domain_level = -1;

err = register_filesystem(&cpuset_fs_type);
if (err < 0)
Expand Down
Loading

0 comments on commit 1d3504f

Please sign in to comment.