Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 105242
b: refs/heads/master
c: 577b4a5
h: refs/heads/master
v: v3
  • Loading branch information
David Howells authored and Ingo Molnar committed Jul 18, 2008
1 parent 54c2749 commit 115869f
Show file tree
Hide file tree
Showing 10 changed files with 103 additions and 160 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1b427c153a08fdbc092c2bdbf845b92fda58d857
refs/heads/master: 577b4a58d2e74a4d48050eeea3e3f952ce04eb86
6 changes: 1 addition & 5 deletions trunk/include/linux/cpumask.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,14 +359,13 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,

/*
* The following particular system cpumasks and operations manage
* possible, present, active and online cpus. Each of them is a fixed size
* possible, present and online cpus. Each of them is a fixed size
* bitmap of size NR_CPUS.
*
* #ifdef CONFIG_HOTPLUG_CPU
* cpu_possible_map - has bit 'cpu' set iff cpu is populatable
* cpu_present_map - has bit 'cpu' set iff cpu is populated
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_map - has bit 'cpu' set iff cpu available to migration
* #else
* cpu_possible_map - has bit 'cpu' set iff cpu is populated
* cpu_present_map - copy of cpu_possible_map
Expand Down Expand Up @@ -417,7 +416,6 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_present_map;
extern cpumask_t cpu_active_map;

#if NR_CPUS > 1
#define num_online_cpus() cpus_weight(cpu_online_map)
Expand All @@ -426,15 +424,13 @@ extern cpumask_t cpu_active_map;
#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map)
#else
#define num_online_cpus() 1
#define num_possible_cpus() 1
#define num_present_cpus() 1
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
#define cpu_active(cpu) ((cpu) == 0)
#endif

#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
Expand Down
7 changes: 0 additions & 7 deletions trunk/include/linux/cpuset.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,6 @@ extern void cpuset_track_online_nodes(void);

extern int current_cpuset_is_being_rebound(void);

extern void rebuild_sched_domains(void);

#else /* !CONFIG_CPUSETS */

static inline int cpuset_init_early(void) { return 0; }
Expand Down Expand Up @@ -158,11 +156,6 @@ static inline int current_cpuset_is_being_rebound(void)
return 0;
}

static inline void rebuild_sched_domains(void)
{
partition_sched_domains(0, NULL, NULL);
}

#endif /* !CONFIG_CPUSETS */

#endif /* _LINUX_CPUSET_H */
11 changes: 1 addition & 10 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -824,16 +824,7 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);

#else /* CONFIG_SMP */

struct sched_domain_attr;

static inline void
partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new)
{
}
#endif /* !CONFIG_SMP */
#endif /* CONFIG_SMP */

struct io_context; /* See blkdev.h */
#define NGROUPS_SMALL 32
Expand Down
7 changes: 0 additions & 7 deletions trunk/init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -415,13 +415,6 @@ static void __init smp_init(void)
{
unsigned int cpu;

/*
* Set up the current CPU as possible to migrate to.
* The other ones will be done by cpu_up/cpu_down()
*/
cpu = smp_processor_id();
cpu_set(cpu, cpu_active_map);

/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
Expand Down
40 changes: 6 additions & 34 deletions trunk/kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,6 @@ void __init cpu_hotplug_init(void)
cpu_hotplug.refcount = 0;
}

cpumask_t cpu_active_map;

#ifdef CONFIG_HOTPLUG_CPU

void get_online_cpus(void)
Expand Down Expand Up @@ -293,30 +291,11 @@ int __ref cpu_down(unsigned int cpu)
int err = 0;

cpu_maps_update_begin();

if (cpu_hotplug_disabled) {
if (cpu_hotplug_disabled)
err = -EBUSY;
goto out;
}

cpu_clear(cpu, cpu_active_map);

/*
* Make sure the all cpus did the reschedule and are not
* using stale version of the cpu_active_map.
* This is not strictly necessary becuase stop_machine()
* that we run down the line already provides the required
* synchronization. But it's really a side effect and we do not
* want to depend on the innards of the stop_machine here.
*/
synchronize_sched();

err = _cpu_down(cpu, 0);
else
err = _cpu_down(cpu, 0);

if (cpu_online(cpu))
cpu_set(cpu, cpu_active_map);

out:
cpu_maps_update_done();
return err;
}
Expand Down Expand Up @@ -376,18 +355,11 @@ int __cpuinit cpu_up(unsigned int cpu)
}

cpu_maps_update_begin();

if (cpu_hotplug_disabled) {
if (cpu_hotplug_disabled)
err = -EBUSY;
goto out;
}

err = _cpu_up(cpu, 0);
else
err = _cpu_up(cpu, 0);

if (cpu_online(cpu))
cpu_set(cpu, cpu_active_map);

out:
cpu_maps_update_done();
return err;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
* partition_sched_domains().
*/

void rebuild_sched_domains(void)
static void rebuild_sched_domains(void)
{
struct kfifo *q; /* queue of cpusets to be scanned */
struct cpuset *cp; /* scans q */
Expand Down
108 changes: 63 additions & 45 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2881,7 +2881,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)

rq = task_rq_lock(p, &flags);
if (!cpu_isset(dest_cpu, p->cpus_allowed)
|| unlikely(!cpu_active(dest_cpu)))
|| unlikely(cpu_is_offline(dest_cpu)))
goto out;

/* force the process onto the specified CPU */
Expand Down Expand Up @@ -3849,7 +3849,7 @@ int select_nohz_load_balancer(int stop_tick)
/*
* If we are going offline and still the leader, give up!
*/
if (!cpu_active(cpu) &&
if (cpu_is_offline(cpu) &&
atomic_read(&nohz.load_balancer) == cpu) {
if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
BUG();
Expand Down Expand Up @@ -5876,7 +5876,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
struct rq *rq_dest, *rq_src;
int ret = 0, on_rq;

if (unlikely(!cpu_active(dest_cpu)))
if (unlikely(cpu_is_offline(dest_cpu)))
return ret;

rq_src = cpu_rq(src_cpu);
Expand Down Expand Up @@ -7553,6 +7553,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
{
}

/*
* Free current domain masks.
* Called after all cpus are attached to NULL domain.
*/
static void free_sched_domains(void)
{
ndoms_cur = 0;
if (doms_cur != &fallback_doms)
kfree(doms_cur);
doms_cur = &fallback_doms;
}

/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
Expand Down Expand Up @@ -7631,7 +7643,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
* ownership of it and will kfree it when done with it. If the caller
* failed the kmalloc call, then it can pass in doms_new == NULL,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms', it also forces the domains to be rebuilt.
* 'fallback_doms'.
*
* Call with hotplug lock held
*/
Expand All @@ -7645,8 +7657,12 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();

if (doms_new == NULL)
ndoms_new = 0;
if (doms_new == NULL) {
ndoms_new = 1;
doms_new = &fallback_doms;
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
dattr_new = NULL;
}

/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
Expand All @@ -7661,14 +7677,6 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
;
}

if (doms_new == NULL) {
ndoms_cur = 0;
ndoms_new = 1;
doms_new = &fallback_doms;
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
dattr_new = NULL;
}

/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur; j++) {
Expand Down Expand Up @@ -7699,10 +7707,17 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int arch_reinit_sched_domains(void)
{
int err;

get_online_cpus();
rebuild_sched_domains();
mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map);
free_sched_domains();
err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
return 0;

return err;
}

static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
Expand Down Expand Up @@ -7768,49 +7783,59 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */

#ifndef CONFIG_CPUSETS
/*
* Add online and remove offline CPUs from the scheduler domains.
* When cpusets are enabled they take over this function.
* Force a reinitialization of the sched domains hierarchy. The domains
* and groups cannot be updated in place without racing with the balancing
* code, so we temporarily attach all running cpus to the NULL domain
* which will prevent rebalancing while the sched domains are recalculated.
*/
static int update_sched_domains(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
partition_sched_domains(0, NULL, NULL);
return NOTIFY_OK;

default:
return NOTIFY_DONE;
}
}
#endif

static int update_runtime(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;

switch (action) {
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
disable_runtime(cpu_rq(cpu));
/* fall-through */
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map);
free_sched_domains();
return NOTIFY_OK;


case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
enable_runtime(cpu_rq(cpu));
return NOTIFY_OK;

/* fall-through */
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
/*
* Fall through and re-initialise the domains.
*/
break;
default:
return NOTIFY_DONE;
}

#ifndef CONFIG_CPUSETS
/*
* Create default domain partitioning if cpusets are disabled.
* Otherwise we let cpusets rebuild the domains based on the
* current setup.
*/

/* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map);
#endif

return NOTIFY_OK;
}

void __init sched_init_smp(void)
Expand All @@ -7830,15 +7855,8 @@ void __init sched_init_smp(void)
cpu_set(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();

#ifndef CONFIG_CPUSETS
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
#endif

/* RT runtime code needs to handle some hotplug events */
hotcpu_notifier(update_runtime, 0);

init_hrtick();

/* Move init over to a non-isolated CPU */
Expand Down
3 changes: 0 additions & 3 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1004,8 +1004,6 @@ static void yield_task_fair(struct rq *rq)
* not idle and an idle cpu is available. The span of cpus to
* search starts with cpus closest then further out as needed,
* so we always favor a closer, idle cpu.
* Domains may include CPUs that are not usable for migration,
* hence we need to mask them out (cpu_active_map)
*
* Returns the CPU we should wake onto.
*/
Expand Down Expand Up @@ -1033,7 +1031,6 @@ static int wake_idle(int cpu, struct task_struct *p)
|| ((sd->flags & SD_WAKE_IDLE_FAR)
&& !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed);
cpus_and(tmp, tmp, cpu_active_map);
for_each_cpu_mask(i, tmp) {
if (idle_cpu(i)) {
if (i != task_cpu(p)) {
Expand Down
Loading

0 comments on commit 115869f

Please sign in to comment.