Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191399
b: refs/heads/master
c: 897f0b3
h: refs/heads/master
i:
  191397: f0b226e
  191395: 8ea2ebe
  191391: be89796
v: v3
  • Loading branch information
Oleg Nesterov authored and Ingo Molnar committed Apr 2, 2010
1 parent 7605663 commit ccaae26
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 47 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 25c2d55c00c6097e6792ebf21e31342f23b9b768
refs/heads/master: 897f0b3c3ff40b443c84e271bef19bd6ae885195
13 changes: 0 additions & 13 deletions trunk/include/linux/cpuset.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
Expand Down Expand Up @@ -69,9 +67,6 @@ struct seq_file;
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);

extern void cpuset_lock(void);
extern void cpuset_unlock(void);

extern int cpuset_mem_spread_node(void);

static inline int cpuset_do_page_mem_spread(void)
Expand Down Expand Up @@ -105,11 +100,6 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
{
cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
{
cpumask_copy(mask, cpu_possible_mask);
}

static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
Expand Down Expand Up @@ -157,9 +147,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
{
}

static inline void cpuset_lock(void) {}
static inline void cpuset_unlock(void) {}

static inline int cpuset_mem_spread_node(void)
{
return 0;
Expand Down
27 changes: 1 addition & 26 deletions trunk/kernel/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -2182,19 +2182,10 @@ void __init cpuset_init_smp(void)
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
mutex_lock(&callback_mutex);
cpuset_cpus_allowed_locked(tsk, pmask);
mutex_unlock(&callback_mutex);
}

/**
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
* Must be called with callback_mutex held.
**/
void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
{
task_lock(tsk);
guarantee_online_cpus(task_cs(tsk), pmask);
task_unlock(tsk);
mutex_unlock(&callback_mutex);
}

void cpuset_init_current_mems_allowed(void)
Expand Down Expand Up @@ -2382,22 +2373,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
return 0;
}

/**
* cpuset_lock - lock out any changes to cpuset structures
*
* The out of memory (oom) code needs to mutex_lock cpusets
* from being changed while it scans the tasklist looking for a
* task in an overlapping cpuset. Expose callback_mutex via this
* cpuset_lock() routine, so the oom code can lock it, before
* locking the task list. The tasklist_lock is a spinlock, so
* must be taken inside callback_mutex.
*/

void cpuset_lock(void)
{
mutex_lock(&callback_mutex);
}

/**
* cpuset_unlock - release lock on cpuset changes
*
Expand Down
10 changes: 3 additions & 7 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2296,11 +2296,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
return dest_cpu;

/* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) {
rcu_read_lock();
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
rcu_read_unlock();
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
if (unlikely(dest_cpu >= nr_cpu_ids)) {
cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
dest_cpu = cpumask_any(cpu_active_mask);

/*
* Don't tell them about moving exiting tasks or
Expand Down Expand Up @@ -5866,7 +5864,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)

case CPU_DEAD:
case CPU_DEAD_FROZEN:
cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
migrate_live_tasks(cpu);
rq = cpu_rq(cpu);
kthread_stop(rq->migration_thread);
Expand All @@ -5879,7 +5876,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
raw_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
calc_global_load_remove(rq);
Expand Down

0 comments on commit ccaae26

Please sign in to comment.