Skip to content

Commit

Permalink
sched: Move sched domain storage into the topology list
Browse files Browse the repository at this point in the history
In order to remove the last dependency on the statid domain levels,
move the sd_data storage into the topology structure.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.924926412@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 11, 2011
1 parent d069b91 commit 54ab4ff
Showing 1 changed file with 77 additions and 52 deletions.
129 changes: 77 additions & 52 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -6837,7 +6837,6 @@ struct sd_data {

struct s_data {
struct sched_domain ** __percpu sd;
struct sd_data sdd[SD_LV_MAX];
struct root_domain *rd;
};

Expand All @@ -6848,12 +6847,15 @@ enum s_alloc {
sa_none,
};

typedef struct sched_domain *(*sched_domain_init_f)(struct s_data *d, int cpu);
struct sched_domain_topology_level;

typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);

struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
struct sd_data data;
};

/*
Expand Down Expand Up @@ -6958,15 +6960,16 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
# define SD_INIT_NAME(sd, type) do { } while (0)
#endif

#define SD_INIT_FUNC(type) \
static noinline struct sched_domain *sd_init_##type(struct s_data *d, int cpu) \
{ \
struct sched_domain *sd = *per_cpu_ptr(d->sdd[SD_LV_##type].sd, cpu); \
*sd = SD_##type##_INIT; \
sd->level = SD_LV_##type; \
SD_INIT_NAME(sd, type); \
sd->private = &d->sdd[SD_LV_##type]; \
return sd; \
#define SD_INIT_FUNC(type) \
static noinline struct sched_domain * \
sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
{ \
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
*sd = SD_##type##_INIT; \
sd->level = SD_LV_##type; \
SD_INIT_NAME(sd, type); \
sd->private = &tl->data; \
return sd; \
}

SD_INIT_FUNC(CPU)
Expand Down Expand Up @@ -7019,26 +7022,20 @@ static void set_domain_attribute(struct sched_domain *sd,
}
}

static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);

static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
int i, j;

switch (what) {
case sa_rootdomain:
if (!atomic_read(&d->rd->refcount))
free_rootdomain(&d->rd->rcu); /* fall through */
case sa_sd:
free_percpu(d->sd); /* fall through */
case sa_sd_storage:
for (i = 0; i < SD_LV_MAX; i++) {
for_each_cpu(j, cpu_map) {
kfree(*per_cpu_ptr(d->sdd[i].sd, j));
kfree(*per_cpu_ptr(d->sdd[i].sg, j));
}
free_percpu(d->sdd[i].sd);
free_percpu(d->sdd[i].sg);
} /* fall through */
__sdt_free(cpu_map); /* fall through */
case sa_none:
break;
}
Expand All @@ -7047,38 +7044,10 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
const struct cpumask *cpu_map)
{
int i, j;

memset(d, 0, sizeof(*d));

for (i = 0; i < SD_LV_MAX; i++) {
d->sdd[i].sd = alloc_percpu(struct sched_domain *);
if (!d->sdd[i].sd)
return sa_sd_storage;

d->sdd[i].sg = alloc_percpu(struct sched_group *);
if (!d->sdd[i].sg)
return sa_sd_storage;

for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;

sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return sa_sd_storage;

*per_cpu_ptr(d->sdd[i].sd, j) = sd;

sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return sa_sd_storage;

*per_cpu_ptr(d->sdd[i].sg, j) = sg;
}
}
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
return sa_sd_storage;
Expand Down Expand Up @@ -7137,12 +7106,68 @@ static struct sched_domain_topology_level default_topology[] = {

static struct sched_domain_topology_level *sched_domain_topology = default_topology;

static int __sdt_alloc(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;

for (tl = sched_domain_topology; tl->init; tl++) {
struct sd_data *sdd = &tl->data;

sdd->sd = alloc_percpu(struct sched_domain *);
if (!sdd->sd)
return -ENOMEM;

sdd->sg = alloc_percpu(struct sched_group *);
if (!sdd->sg)
return -ENOMEM;

for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;

sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sd)
return -ENOMEM;

*per_cpu_ptr(sdd->sd, j) = sd;

sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
if (!sg)
return -ENOMEM;

*per_cpu_ptr(sdd->sg, j) = sg;
}
}

return 0;
}

static void __sdt_free(const struct cpumask *cpu_map)
{
struct sched_domain_topology_level *tl;
int j;

for (tl = sched_domain_topology; tl->init; tl++) {
struct sd_data *sdd = &tl->data;

for_each_cpu(j, cpu_map) {
kfree(*per_cpu_ptr(sdd->sd, j));
kfree(*per_cpu_ptr(sdd->sg, j));
}
free_percpu(sdd->sd);
free_percpu(sdd->sg);
}
}

struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
struct s_data *d, const struct cpumask *cpu_map,
struct sched_domain_attr *attr, struct sched_domain *child,
int cpu)
{
struct sched_domain *sd = tl->init(d, cpu);
struct sched_domain *sd = tl->init(tl, cpu);
if (!sd)
return child;

Expand Down

0 comments on commit 54ab4ff

Please sign in to comment.