diff --git a/[refs] b/[refs] index 0eed93f8cf02..b6b42d70b725 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: bd453cd487ac7116a269517779b83c1061debbec +refs/heads/master: 641cf4a668e9e69d2bc061e953422ff72a91f86e diff --git a/trunk/include/linux/posix_acl.h b/trunk/include/linux/posix_acl.h index 0cdba01b7756..c513466c7dc7 100644 --- a/trunk/include/linux/posix_acl.h +++ b/trunk/include/linux/posix_acl.h @@ -83,6 +83,7 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t); extern struct posix_acl *get_posix_acl(struct inode *, int); extern int set_posix_acl(struct inode *, int, struct posix_acl *); +#ifdef CONFIG_FS_POSIX_ACL static inline struct posix_acl *get_cached_acl(struct inode *inode, int type) { struct posix_acl **p, *acl; @@ -146,5 +147,5 @@ static inline void forget_cached_acl(struct inode *inode, int type) if (old != ACL_NOT_CACHED) posix_acl_release(old); } - +#endif #endif /* __LINUX_POSIX_ACL_H */ diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 1c337112335c..80b5ce716596 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -284,25 +284,6 @@ void put_futex_key(int fshared, union futex_key *key) drop_futex_key_refs(key); } -/* - * fault_in_user_writeable - fault in user address and verify RW access - * @uaddr: pointer to faulting user space address - * - * Slow path to fixup the fault we just took in the atomic write - * access to @uaddr. - * - * We have no generic implementation of a non destructive write to the - * user address. We know that we faulted in the atomic pagefault - * disabled section so we can as well avoid the #PF overhead by - * calling get_user_pages() right away. - */ -static int fault_in_user_writeable(u32 __user *uaddr) -{ - int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, - sizeof(*uaddr), 1, 0, NULL, NULL); - return ret < 0 ? ret : 0; -} - /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in @@ -915,6 +896,7 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, retry_private: op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { + u32 dummy; double_unlock_hb(hb1, hb2); @@ -932,7 +914,7 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, goto out_put_keys; } - ret = fault_in_user_writeable(uaddr2); + ret = get_user(dummy, uaddr2); if (ret) goto out_put_keys; @@ -1222,7 +1204,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, double_unlock_hb(hb1, hb2); put_futex_key(fshared, &key2); put_futex_key(fshared, &key1); - ret = fault_in_user_writeable(uaddr2); + ret = get_user(curval2, uaddr2); if (!ret) goto retry; goto out; @@ -1500,7 +1482,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, handle_fault: spin_unlock(q->lock_ptr); - ret = fault_in_user_writeable(uaddr); + ret = get_user(uval, uaddr); spin_lock(q->lock_ptr); @@ -1825,6 +1807,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, { struct hrtimer_sleeper timeout, *to = NULL; struct futex_hash_bucket *hb; + u32 uval; struct futex_q q; int res, ret; @@ -1926,9 +1909,16 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: + /* + * We have to r/w *(int __user *)uaddr, and we have to modify it + * atomically. Therefore, if we continue to fault after get_user() + * below, we need to handle the fault ourselves, while still holding + * the mmap_sem. This can occur if the uaddr is under contention as + * we have to drop the mmap_sem in order to call get_user(). + */ queue_unlock(&q, hb); - ret = fault_in_user_writeable(uaddr); + ret = get_user(uval, uaddr); if (ret) goto out_put_key; @@ -2023,10 +2013,17 @@ static int futex_unlock_pi(u32 __user *uaddr, int fshared) return ret; pi_faulted: + /* + * We have to r/w *(int __user *)uaddr, and we have to modify it + * atomically. Therefore, if we continue to fault after get_user() + * below, we need to handle the fault ourselves, while still holding + * the mmap_sem. This can occur if the uaddr is under contention as + * we have to drop the mmap_sem in order to call get_user(). + */ spin_unlock(&hb->lock); put_futex_key(fshared, &key); - ret = fault_in_user_writeable(uaddr); + ret = get_user(uval, uaddr); if (!ret) goto retry; diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 5d714f8fb303..aecc9cdfdfce 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -1153,10 +1153,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to - * allocate greater than order-1 page units with + * allocate greater than single-page units with * __GFP_NOFAIL. */ - WARN_ON_ONCE(order > 1); + WARN_ON_ONCE(order > 0); } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 819f056b39c6..ce62b770e2fc 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -1085,17 +1085,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; struct kmem_cache_order_objects oo = s->oo; - gfp_t alloc_gfp; flags |= s->allocflags; - /* - * Let the initial higher-order allocation fail under memory pressure - * so we fall-back to the minimum order allocation. - */ - alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; - - page = alloc_slab_page(alloc_gfp, node, oo); + page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, + oo); if (unlikely(!page)) { oo = s->min; /*