diff --git a/[refs] b/[refs] index 778c412109d5..4307dd51dddb 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: da1b001a2a2a9cb323e58327ed07ac4a5e6d96ea +refs/heads/master: e3c1620434ac77b618ce74c024ace3559602ac99 diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index 6e4ea6d87774..37f72ee5bf7c 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -2213,15 +2213,14 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry, * The hash value has to match the hash queue that the dentry is on.. */ /* - * __d_move - move a dentry + * d_move - move a dentry * @dentry: entry to move * @target: new dentry * * Update the dcache to reflect the move of a file name. Negative - * dcache entries should not be moved in this way. Caller hold - * rename_lock. + * dcache entries should not be moved in this way. */ -static void __d_move(struct dentry * dentry, struct dentry * target) +void d_move(struct dentry * dentry, struct dentry * target) { if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); @@ -2229,6 +2228,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target) BUG_ON(d_ancestor(dentry, target)); BUG_ON(d_ancestor(target, dentry)); + write_seqlock(&rename_lock); + dentry_lock_for_move(dentry, target); write_seqcount_begin(&dentry->d_seq); @@ -2274,20 +2275,6 @@ static void __d_move(struct dentry * dentry, struct dentry * target) spin_unlock(&target->d_lock); fsnotify_d_move(dentry); spin_unlock(&dentry->d_lock); -} - -/* - * d_move - move a dentry - * @dentry: entry to move - * @target: new dentry - * - * Update the dcache to reflect the move of a file name. Negative - * dcache entries should not be moved in this way. - */ -void d_move(struct dentry *dentry, struct dentry *target) -{ - write_seqlock(&rename_lock); - __d_move(dentry, target); write_sequnlock(&rename_lock); } EXPORT_SYMBOL(d_move); @@ -2315,7 +2302,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) * This helper attempts to cope with remotely renamed directories * * It assumes that the caller is already holding - * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock + * dentry->d_parent->d_inode->i_mutex and the inode->i_lock * * Note: If ever the locking in lock_rename() changes, then please * remember to update this too... @@ -2330,6 +2317,11 @@ static struct dentry *__d_unalias(struct inode *inode, if (alias->d_parent == dentry->d_parent) goto out_unalias; + /* Check for loops */ + ret = ERR_PTR(-ELOOP); + if (d_ancestor(alias, dentry)) + goto out_err; + /* See lock_rename() */ ret = ERR_PTR(-EBUSY); if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) @@ -2339,7 +2331,7 @@ static struct dentry *__d_unalias(struct inode *inode, goto out_err; m2 = &alias->d_parent->d_inode->i_mutex; out_unalias: - __d_move(alias, dentry); + d_move(alias, dentry); ret = alias; out_err: spin_unlock(&inode->i_lock); @@ -2424,24 +2416,15 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) alias = __d_find_alias(inode, 0); if (alias) { actual = alias; - write_seqlock(&rename_lock); - - if (d_ancestor(alias, dentry)) { - /* Check for loops */ - actual = ERR_PTR(-ELOOP); - } else if (IS_ROOT(alias)) { - /* Is this an anonymous mountpoint that we - * could splice into our tree? */ + /* Is this an anonymous mountpoint that we could splice + * into our tree? */ + if (IS_ROOT(alias)) { __d_materialise_dentry(dentry, alias); - write_sequnlock(&rename_lock); __d_drop(alias); goto found; - } else { - /* Nope, but we must(!) avoid directory - * aliasing */ - actual = __d_unalias(inode, dentry, alias); } - write_sequnlock(&rename_lock); + /* Nope, but we must(!) avoid directory aliasing */ + actual = __d_unalias(inode, dentry, alias); if (IS_ERR(actual)) dput(alias); goto out_nolock; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 5c867dd1c0b3..0223c41fb114 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -433,8 +433,6 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) goto err_parent; BUG_ON(nd->inode != parent->d_inode); } else { - if (dentry->d_parent != parent) - goto err_parent; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err_child; diff --git a/trunk/include/drm/drm_pciids.h b/trunk/include/drm/drm_pciids.h index e08f344c6cff..3d53efd25ab9 100644 --- a/trunk/include/drm/drm_pciids.h +++ b/trunk/include/drm/drm_pciids.h @@ -182,6 +182,7 @@ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ @@ -192,6 +193,7 @@ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index ba06207b1dd3..7e59ffb3d0ba 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -84,32 +84,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); static struct rcu_state *rcu_state; -/* - * The rcu_scheduler_active variable transitions from zero to one just - * before the first task is spawned. So when this variable is zero, RCU - * can assume that there is but one task, allowing RCU to (for example) - * optimized synchronize_sched() to a simple barrier(). When this variable - * is one, RCU must actually do all the hard work required to detect real - * grace periods. This variable is also used to suppress boot-time false - * positives from lockdep-RCU error checking. - */ int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); -/* - * The rcu_scheduler_fully_active variable transitions from zero to one - * during the early_initcall() processing, which is after the scheduler - * is capable of creating new tasks. So RCU processing (for example, - * creating tasks for RCU priority boosting) must be delayed until after - * rcu_scheduler_fully_active transitions from zero to one. We also - * currently delay invocation of any RCU callbacks until after this point. - * - * It might later prove better for people registering RCU callbacks during - * early boot to take responsibility for these callbacks, but one step at - * a time. - */ -static int rcu_scheduler_fully_active __read_mostly; - #ifdef CONFIG_RCU_BOOST /* @@ -121,6 +98,7 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); +static char rcu_kthreads_spawnable; #endif /* #ifdef CONFIG_RCU_BOOST */ @@ -1489,8 +1467,6 @@ static void rcu_process_callbacks(struct softirq_action *unused) */ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { - if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) - return; if (likely(!rsp->boost)) { rcu_do_batch(rsp, rdp); return; diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 75113cb7c4fb..14dc7dd00902 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -1532,7 +1532,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) struct sched_param sp; struct task_struct *t; - if (!rcu_scheduler_fully_active || + if (!rcu_kthreads_spawnable || per_cpu(rcu_cpu_kthread_task, cpu) != NULL) return 0; t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); @@ -1639,7 +1639,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, struct sched_param sp; struct task_struct *t; - if (!rcu_scheduler_fully_active || + if (!rcu_kthreads_spawnable || rnp->qsmaskinit == 0) return 0; if (rnp->node_kthread_task == NULL) { @@ -1665,7 +1665,7 @@ static int __init rcu_spawn_kthreads(void) int cpu; struct rcu_node *rnp; - rcu_scheduler_fully_active = 1; + rcu_kthreads_spawnable = 1; for_each_possible_cpu(cpu) { per_cpu(rcu_cpu_has_work, cpu) = 0; if (cpu_online(cpu)) @@ -1687,7 +1687,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) struct rcu_node *rnp = rdp->mynode; /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_scheduler_fully_active) { + if (rcu_kthreads_spawnable) { (void)rcu_spawn_one_cpu_kthread(cpu); if (rnp->node_kthread_task == NULL) (void)rcu_spawn_one_node_kthread(rcu_state, rnp); @@ -1726,13 +1726,6 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt) { } -static int __init rcu_scheduler_really_started(void) -{ - rcu_scheduler_fully_active = 1; - return 0; -} -early_initcall(rcu_scheduler_really_started); - static void __cpuinit rcu_prepare_kthreads(int cpu) { } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 3dc716f6d8ad..9769c756ad66 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -7757,9 +7757,6 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) #endif #endif cfs_rq->min_vruntime = (u64)(-(1LL << 20)); -#ifndef CONFIG_64BIT - cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; -#endif } static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)