diff --git a/[refs] b/[refs] index fe6e0628be62..402aa3b50d92 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fffaee365fded09f9ebf2db19066065fa54323c3 +refs/heads/master: d4e30ef05c9e0fad9782de34f0acd039e238fd43 diff --git a/trunk/arch/blackfin/kernel/process.c b/trunk/arch/blackfin/kernel/process.c index 62bcea7dcc6d..2e3994b20169 100644 --- a/trunk/arch/blackfin/kernel/process.c +++ b/trunk/arch/blackfin/kernel/process.c @@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs) unsigned long newsp; #ifdef __ARCH_SYNC_CORE_DCACHE - if (current->nr_cpus_allowed == num_possible_cpus()) + if (current->rt.nr_cpus_allowed == num_possible_cpus()) set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); #endif diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index fd019d78b1f4..f56f96da77f5 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -410,7 +410,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) /* maps the cpu to the sched domain representing multi-core */ const struct cpumask *cpu_coregroup_mask(int cpu) { - return cpu_llc_shared_mask(cpu); + struct cpuinfo_x86 *c = &cpu_data(cpu); + /* + * For perf, we return last level cache shared map. + * And for power savings, we return cpu_core_map + */ + if (!(cpu_has(c, X86_FEATURE_AMD_DCM))) + return cpu_core_mask(cpu); + else + return cpu_llc_shared_mask(cpu); } static void impress_friends(void) diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 45cfcea63507..f30dc95f83b1 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -2426,6 +2426,12 @@ int r600_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio init failed\n"); + return r; + } + return 0; } @@ -2462,12 +2468,6 @@ int r600_resume(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - DRM_ERROR("radeon: audio resume failed\n"); - return r; - } - return r; } @@ -2577,9 +2577,6 @@ int r600_init(struct radeon_device *rdev) rdev->accel_working = false; } - r = r600_audio_init(rdev); - if (r) - return r; /* TODO error handling */ return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index 25f9eef12c42..e95c5e61d4e2 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "failed initializing audio\n"); - return r; - } - r = radeon_ib_pool_start(rdev); if (r) return r; @@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "failed initializing audio\n"); + return r; + } + return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 3277ddecfe9f..159b6a43fda0 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "failed initializing audio\n"); - return r; - } - r = radeon_ib_pool_start(rdev); if (r) return r; @@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "failed initializing audio\n"); + return r; + } + return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 04ddc365a908..4ad0281fdc37 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -956,6 +956,12 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio init failed\n"); + return r; + } + return 0; } @@ -978,12 +984,6 @@ int rv770_resume(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "radeon: audio init failed\n"); - return r; - } - return r; } @@ -1092,12 +1092,6 @@ int rv770_init(struct radeon_device *rdev) rdev->accel_working = false; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "radeon: audio init failed\n"); - return r; - } - return 0; } diff --git a/trunk/fs/fuse/control.c b/trunk/fs/fuse/control.c index 03ff5b1eba93..42593c587d48 100644 --- a/trunk/fs/fuse/control.c +++ b/trunk/fs/fuse/control.c @@ -75,13 +75,19 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf, unsigned global_limit) { unsigned long t; + char tmp[32]; unsigned limit = (1 << 16) - 1; int err; - if (*ppos) + if (*ppos || count >= sizeof(tmp) - 1) + return -EINVAL; + + if (copy_from_user(tmp, buf, count)) return -EINVAL; - err = kstrtoul_from_user(buf, count, 0, &t); + tmp[count] = '\0'; + + err = strict_strtoul(tmp, 0, &t); if (err) return err; diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index 334e0b18a014..df5ac048dc74 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -775,8 +775,6 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, struct kstat *stat) { - unsigned int blkbits; - stat->dev = inode->i_sb->s_dev; stat->ino = attr->ino; stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); @@ -792,13 +790,7 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, stat->ctime.tv_nsec = attr->ctimensec; stat->size = attr->size; stat->blocks = attr->blocks; - - if (attr->blksize != 0) - blkbits = ilog2(attr->blksize); - else - blkbits = inode->i_sb->s_blocksize_bits; - - stat->blksize = 1 << blkbits; + stat->blksize = (1 << inode->i_blkbits); } static int fuse_do_getattr(struct inode *inode, struct kstat *stat, @@ -871,7 +863,6 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, if (stat) { generic_fillattr(inode, stat); stat->mode = fi->orig_i_mode; - stat->ino = fi->orig_ino; } } diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index b321a688cde7..9562109d3a87 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -2173,44 +2173,6 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, return ret; } -long fuse_file_fallocate(struct file *file, int mode, loff_t offset, - loff_t length) -{ - struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; - struct fuse_req *req; - struct fuse_fallocate_in inarg = { - .fh = ff->fh, - .offset = offset, - .length = length, - .mode = mode - }; - int err; - - if (fc->no_fallocate) - return -EOPNOTSUPP; - - req = fuse_get_req(fc); - if (IS_ERR(req)) - return PTR_ERR(req); - - req->in.h.opcode = FUSE_FALLOCATE; - req->in.h.nodeid = ff->nodeid; - req->in.numargs = 1; - req->in.args[0].size = sizeof(inarg); - req->in.args[0].value = &inarg; - fuse_request_send(fc, req); - err = req->out.h.error; - if (err == -ENOSYS) { - fc->no_fallocate = 1; - err = -EOPNOTSUPP; - } - fuse_put_request(fc, req); - - return err; -} -EXPORT_SYMBOL_GPL(fuse_file_fallocate); - static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, .read = do_sync_read, @@ -2228,7 +2190,6 @@ static const struct file_operations fuse_file_operations = { .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, - .fallocate = fuse_file_fallocate, }; static const struct file_operations fuse_direct_io_file_operations = { @@ -2245,7 +2206,6 @@ static const struct file_operations fuse_direct_io_file_operations = { .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, - .fallocate = fuse_file_fallocate, /* no splice_read */ }; diff --git a/trunk/fs/fuse/fuse_i.h b/trunk/fs/fuse/fuse_i.h index 771fb6322c07..572cefc78012 100644 --- a/trunk/fs/fuse/fuse_i.h +++ b/trunk/fs/fuse/fuse_i.h @@ -82,9 +82,6 @@ struct fuse_inode { preserve the original mode */ umode_t orig_i_mode; - /** 64 bit inode number */ - u64 orig_ino; - /** Version of last attribute change */ u64 attr_version; @@ -481,9 +478,6 @@ struct fuse_conn { /** Are BSD file locking primitives not implemented by fs? */ unsigned no_flock:1; - /** Is fallocate not implemented by fs? */ - unsigned no_fallocate:1; - /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index 1cd61652018c..42678a33b7bb 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -91,7 +91,6 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) fi->nlookup = 0; fi->attr_version = 0; fi->writectr = 0; - fi->orig_ino = 0; INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); @@ -140,18 +139,6 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) return 0; } -/* - * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down - * so that it will fit. - */ -static ino_t fuse_squash_ino(u64 ino64) -{ - ino_t ino = (ino_t) ino64; - if (sizeof(ino_t) < sizeof(u64)) - ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; - return ino; -} - void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, u64 attr_valid) { @@ -161,7 +148,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; - inode->i_ino = fuse_squash_ino(attr->ino); + inode->i_ino = attr->ino; inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); set_nlink(inode, attr->nlink); inode->i_uid = attr->uid; @@ -187,8 +174,6 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, fi->orig_i_mode = inode->i_mode; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) inode->i_mode &= ~S_ISVTX; - - fi->orig_ino = attr->ino; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, diff --git a/trunk/include/linux/fuse.h b/trunk/include/linux/fuse.h index 9303348965fb..8f2ab8fef929 100644 --- a/trunk/include/linux/fuse.h +++ b/trunk/include/linux/fuse.h @@ -54,9 +54,6 @@ * 7.18 * - add FUSE_IOCTL_DIR flag * - add FUSE_NOTIFY_DELETE - * - * 7.19 - * - add FUSE_FALLOCATE */ #ifndef _LINUX_FUSE_H @@ -88,7 +85,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 19 +#define FUSE_KERNEL_MINOR_VERSION 18 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -281,7 +278,6 @@ enum fuse_opcode { FUSE_POLL = 40, FUSE_NOTIFY_REPLY = 41, FUSE_BATCH_FORGET = 42, - FUSE_FALLOCATE = 43, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -575,14 +571,6 @@ struct fuse_notify_poll_wakeup_out { __u64 kh; }; -struct fuse_fallocate_in { - __u64 fh; - __u64 offset; - __u64 length; - __u32 mode; - __u32 padding; -}; - struct fuse_in_header { __u32 len; __u32 opcode; diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 9e65eff6af3b..e4baff5f7ff4 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -149,7 +149,6 @@ extern struct cred init_cred; .normal_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ - .nr_cpus_allowed= NR_CPUS, \ .mm = NULL, \ .active_mm = &init_mm, \ .se = { \ @@ -158,6 +157,7 @@ extern struct cred init_cred; .rt = { \ .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ .time_slice = RR_TIMESLICE, \ + .nr_cpus_allowed = NR_CPUS, \ }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ INIT_PUSHABLE_TASKS(tsk) \ diff --git a/trunk/include/linux/radix-tree.h b/trunk/include/linux/radix-tree.h index ffc444c38b0a..0d04cd69ab9b 100644 --- a/trunk/include/linux/radix-tree.h +++ b/trunk/include/linux/radix-tree.h @@ -368,11 +368,8 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) iter->index++; if (likely(*slot)) return slot; - if (flags & RADIX_TREE_ITER_CONTIG) { - /* forbid switching to the next chunk */ - iter->next_index = 0; + if (flags & RADIX_TREE_ITER_CONTIG) break; - } } } return NULL; diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 6029d8c54476..f34437e835a7 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -145,7 +145,6 @@ extern unsigned long this_cpu_load(void); extern void calc_global_load(unsigned long ticks); -extern void update_cpu_load_nohz(void); extern unsigned long get_parent_ip(unsigned long addr); @@ -1188,6 +1187,7 @@ struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned int time_slice; + int nr_cpus_allowed; struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED @@ -1252,7 +1252,6 @@ struct task_struct { #endif unsigned int policy; - int nr_cpus_allowed; cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index c46958e26121..39eb6011bc38 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -142,8 +142,9 @@ const_debug unsigned int sysctl_sched_features = #define SCHED_FEAT(name, enabled) \ #name , -static const char * const sched_feat_names[] = { +static __read_mostly char *sched_feat_names[] = { #include "features.h" + NULL }; #undef SCHED_FEAT @@ -2516,32 +2517,25 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, sched_avg_update(this_rq); } -#ifdef CONFIG_NO_HZ -/* - * There is no sane way to deal with nohz on smp when using jiffies because the - * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading - * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. - * - * Therefore we cannot use the delta approach from the regular tick since that - * would seriously skew the load calculation. However we'll make do for those - * updates happening while idle (nohz_idle_balance) or coming out of idle - * (tick_nohz_idle_exit). - * - * This means we might still be one tick off for nohz periods. - */ - /* * Called from nohz_idle_balance() to update the load ratings before doing the * idle balance. */ void update_idle_cpu_load(struct rq *this_rq) { - unsigned long curr_jiffies = ACCESS_ONCE(jiffies); + unsigned long curr_jiffies = jiffies; unsigned long load = this_rq->load.weight; unsigned long pending_updates; /* - * bail if there's load or we're actually up-to-date. + * Bloody broken means of dealing with nohz, but better than nothing.. + * jiffies is updated by one cpu, another cpu can drift wrt the jiffy + * update and see 0 difference the one time and 2 the next, even though + * we ticked at roughtly the same rate. + * + * Hence we only use this from nohz_idle_balance() and skip this + * nonsense when called from the scheduler_tick() since that's + * guaranteed a stable rate. */ if (load || curr_jiffies == this_rq->last_load_update_tick) return; @@ -2552,39 +2546,13 @@ void update_idle_cpu_load(struct rq *this_rq) __update_cpu_load(this_rq, load, pending_updates); } -/* - * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. - */ -void update_cpu_load_nohz(void) -{ - struct rq *this_rq = this_rq(); - unsigned long curr_jiffies = ACCESS_ONCE(jiffies); - unsigned long pending_updates; - - if (curr_jiffies == this_rq->last_load_update_tick) - return; - - raw_spin_lock(&this_rq->lock); - pending_updates = curr_jiffies - this_rq->last_load_update_tick; - if (pending_updates) { - this_rq->last_load_update_tick = curr_jiffies; - /* - * We were idle, this means load 0, the current load might be - * !0 due to remote wakeups and the sort. - */ - __update_cpu_load(this_rq, 0, pending_updates); - } - raw_spin_unlock(&this_rq->lock); -} -#endif /* CONFIG_NO_HZ */ - /* * Called from scheduler_tick() */ static void update_cpu_load_active(struct rq *this_rq) { /* - * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). + * See the mess in update_idle_cpu_load(). */ this_rq->last_load_update_tick = jiffies; __update_cpu_load(this_rq, this_rq->load.weight, 1); @@ -5014,7 +4982,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) p->sched_class->set_cpus_allowed(p, new_mask); cpumask_copy(&p->cpus_allowed, new_mask); - p->nr_cpus_allowed = cpumask_weight(new_mask); + p->rt.nr_cpus_allowed = cpumask_weight(new_mask); } /* @@ -6029,14 +5997,11 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) cpumask_or(covered, covered, sg_span); - sg->sgp = *per_cpu_ptr(sdd->sgp, i); + sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); atomic_inc(&sg->sgp->ref); - if ((!groups && cpumask_test_cpu(cpu, sg_span)) || - cpumask_first(sg_span) == cpu) { - WARN_ON_ONCE(!cpumask_test_cpu(cpu, sg_span)); + if (cpumask_test_cpu(cpu, sg_span)) groups = sg; - } if (!first) first = sg; @@ -6438,7 +6403,7 @@ static void sched_init_numa(void) return; for (j = 0; j < nr_node_ids; j++) { - struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); + struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j); if (!mask) return; @@ -6726,6 +6691,7 @@ static int init_sched_domains(const struct cpumask *cpu_map) if (!doms_cur) doms_cur = &fallback_doms; cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); + dattr_cur = NULL; err = build_sched_domains(doms_cur[0], NULL); register_sched_domain_sysctl(); diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index b2a2d236f27b..940e6d17cf96 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) int want_sd = 1; int sync = wake_flags & WF_SYNC; - if (p->nr_cpus_allowed == 1) + if (p->rt.nr_cpus_allowed == 1) return prev_cpu; if (sd_flag & SD_BALANCE_WAKE) { @@ -3503,22 +3503,15 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) unsigned long scale_rt_power(int cpu) { struct rq *rq = cpu_rq(cpu); - u64 total, available, age_stamp, avg; + u64 total, available; - /* - * Since we're reading these variables without serialization make sure - * we read them once before doing sanity checks on them. - */ - age_stamp = ACCESS_ONCE(rq->age_stamp); - avg = ACCESS_ONCE(rq->rt_avg); - - total = sched_avg_period() + (rq->clock - age_stamp); + total = sched_avg_period() + (rq->clock - rq->age_stamp); - if (unlikely(total < avg)) { + if (unlikely(total < rq->rt_avg)) { /* Ensures that power won't end up being negative */ available = 0; } else { - available = total - avg; + available = total - rq->rt_avg; } if (unlikely((s64)total < SCHED_POWER_SCALE)) @@ -3581,26 +3574,11 @@ void update_group_power(struct sched_domain *sd, int cpu) power = 0; - if (child->flags & SD_OVERLAP) { - /* - * SD_OVERLAP domains cannot assume that child groups - * span the current group. - */ - - for_each_cpu(cpu, sched_group_cpus(sdg)) - power += power_of(cpu); - } else { - /* - * !SD_OVERLAP domains can assume that child groups - * span the current group. - */ - - group = child->groups; - do { - power += group->sgp->power; - group = group->next; - } while (group != child->groups); - } + group = child->groups; + do { + power += group->sgp->power; + group = group->next; + } while (group != child->groups); sdg->sgp->power = power; } diff --git a/trunk/kernel/sched/rt.c b/trunk/kernel/sched/rt.c index 2a4e8dffbd6b..c5565c3c515f 100644 --- a/trunk/kernel/sched/rt.c +++ b/trunk/kernel/sched/rt.c @@ -274,16 +274,13 @@ static void update_rt_migration(struct rt_rq *rt_rq) static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { - struct task_struct *p; - if (!rt_entity_is_task(rt_se)) return; - p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; - if (p->nr_cpus_allowed > 1) + if (rt_se->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); @@ -291,16 +288,13 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { - struct task_struct *p; - if (!rt_entity_is_task(rt_se)) return; - p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; - if (p->nr_cpus_allowed > 1) + if (rt_se->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); @@ -1167,7 +1161,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) + if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); inc_nr_running(rq); @@ -1231,7 +1225,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) cpu = task_cpu(p); - if (p->nr_cpus_allowed == 1) + if (p->rt.nr_cpus_allowed == 1) goto out; /* For anything but wake ups, just return the task_cpu */ @@ -1266,9 +1260,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) * will have to sort it out. */ if (curr && unlikely(rt_task(curr)) && - (curr->nr_cpus_allowed < 2 || + (curr->rt.nr_cpus_allowed < 2 || curr->prio <= p->prio) && - (p->nr_cpus_allowed > 1)) { + (p->rt.nr_cpus_allowed > 1)) { int target = find_lowest_rq(p); if (target != -1) @@ -1282,10 +1276,10 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) { - if (rq->curr->nr_cpus_allowed == 1) + if (rq->curr->rt.nr_cpus_allowed == 1) return; - if (p->nr_cpus_allowed != 1 + if (p->rt.nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) return; @@ -1401,7 +1395,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) * The previous task needs to be made eligible for pushing * if it is still active */ - if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) + if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1414,7 +1408,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && - (p->nr_cpus_allowed > 1)) + (p->rt.nr_cpus_allowed > 1)) return 1; return 0; } @@ -1470,7 +1464,7 @@ static int find_lowest_rq(struct task_struct *task) if (unlikely(!lowest_mask)) return -1; - if (task->nr_cpus_allowed == 1) + if (task->rt.nr_cpus_allowed == 1) return -1; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) @@ -1592,7 +1586,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); - BUG_ON(p->nr_cpus_allowed <= 1); + BUG_ON(p->rt.nr_cpus_allowed <= 1); BUG_ON(!p->on_rq); BUG_ON(!rt_task(p)); @@ -1799,9 +1793,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && - p->nr_cpus_allowed > 1 && + p->rt.nr_cpus_allowed > 1 && rt_task(rq->curr) && - (rq->curr->nr_cpus_allowed < 2 || + (rq->curr->rt.nr_cpus_allowed < 2 || rq->curr->prio <= p->prio)) push_rt_tasks(rq); } @@ -1823,7 +1817,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, * Only update if the process changes its state from whether it * can migrate or not. */ - if ((p->nr_cpus_allowed > 1) == (weight > 1)) + if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) return; rq = task_rq(p); @@ -1985,8 +1979,6 @@ static void watchdog(struct rq *rq, struct task_struct *p) static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) { - struct sched_rt_entity *rt_se = &p->rt; - update_curr_rt(rq); watchdog(rq, p); @@ -2004,15 +1996,12 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) p->rt.time_slice = RR_TIMESLICE; /* - * Requeue to the end of queue if we (and all of our ancestors) are the - * only element on the queue + * Requeue to the end of queue if we are not the only element + * on the queue: */ - for_each_sched_rt_entity(rt_se) { - if (rt_se->run_list.prev != rt_se->run_list.next) { - requeue_task_rt(rq, p, 0); - set_tsk_need_resched(p); - return; - } + if (p->rt.run_list.prev != p->rt.run_list.next) { + requeue_task_rt(rq, p, 0); + set_tsk_need_resched(p); } } diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index da70c6db496c..efd386667536 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -576,7 +576,6 @@ void tick_nohz_idle_exit(void) /* Update jiffies first */ select_nohz_load_balancer(0); tick_do_update_jiffies64(now); - update_cpu_load_nohz(); #ifndef CONFIG_VIRT_CPU_ACCOUNTING /* diff --git a/trunk/lib/radix-tree.c b/trunk/lib/radix-tree.c index e7964296fd50..d7c878cc006c 100644 --- a/trunk/lib/radix-tree.c +++ b/trunk/lib/radix-tree.c @@ -686,9 +686,6 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * during iterating; it can be zero only at the beginning. * And we cannot overflow iter->next_index in a single step, * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. - * - * This condition also used by radix_tree_next_slot() to stop - * contiguous iterating, and forbid swithing to the next chunk. */ index = iter->next_index; if (!index && iter->index)