Skip to content

Commit

Permalink
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/tip/linux-2.6-tip

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  kernel/sched.c: add missing forward declaration for 'double_rq_lock'
  sched: partly revert "sched debug: remove NULL checking in print_cfs_rt_rq()"
  cpumask: fix CONFIG_NUMA=y sched.c
  • Loading branch information
Linus Torvalds committed Jan 13, 2009
2 parents 1181a24 + fd2ab30 commit 1284709
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 9 deletions.
13 changes: 8 additions & 5 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch);
DEFINE_TRACE(sched_migrate_task);

#ifdef CONFIG_SMP

static void double_rq_lock(struct rq *rq1, struct rq *rq2);

/*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
* Since cpu_power is a 'constant', we can use a reciprocal divide.
Expand Down Expand Up @@ -7282,10 +7285,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
static DEFINE_PER_CPU(struct sched_domain, node_domains);
static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu;

static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);

static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
Expand Down Expand Up @@ -7560,7 +7563,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
sd = &per_cpu(allnodes_domains, i);
sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
Expand All @@ -7570,7 +7573,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
} else
p = NULL;

sd = &per_cpu(node_domains, i);
sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
Expand Down Expand Up @@ -7688,7 +7691,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(j, nodemask) {
struct sched_domain *sd;

sd = &per_cpu(node_domains, j);
sd = &per_cpu(node_domains, j).sd;
sd->groups = sg;
}
sg->__cpu_power = 0;
Expand Down
21 changes: 17 additions & 4 deletions kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,19 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_unlock_irqrestore(&tasklist_lock, flags);
}

#if defined(CONFIG_CGROUP_SCHED) && \
(defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
static void task_group_path(struct task_group *tg, char *buf, int buflen)
{
/* may be NULL if the underlying cgroup isn't fully-created yet */
if (!tg->css.cgroup) {
buf[0] = '\0';
return;
}
cgroup_path(tg->css.cgroup, buf, buflen);
}
#endif

void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
Expand All @@ -154,10 +167,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
unsigned long flags;

#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
char path[128] = "";
char path[128];
struct task_group *tg = cfs_rq->tg;

cgroup_path(tg->css.cgroup, path, sizeof(path));
task_group_path(tg, path, sizeof(path));

SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
Expand Down Expand Up @@ -208,10 +221,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
#if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
char path[128] = "";
char path[128];
struct task_group *tg = rt_rq->tg;

cgroup_path(tg->css.cgroup, path, sizeof(path));
task_group_path(tg, path, sizeof(path));

SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
#else
Expand Down

0 comments on commit 1284709

Please sign in to comment.