Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 154300
b: refs/heads/master
c: c622304
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Jun 24, 2009
1 parent efb1493 commit 73ac2c6
Show file tree
Hide file tree
Showing 9 changed files with 56 additions and 42 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3a6a6c16be78472a52f6dd7d88913373b42ad0f7
refs/heads/master: c6223048259006759237d826219f0fa4f312fb47
6 changes: 2 additions & 4 deletions trunk/fs/btrfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -2122,10 +2122,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
if (!maybe_acls) {
inode->i_acl = NULL;
inode->i_default_acl = NULL;
}
if (!maybe_acls)
cache_no_acl(inode);

BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
alloc_group_block, 0);
Expand Down
3 changes: 1 addition & 2 deletions trunk/fs/jffs2/acl.c
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,7 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
struct posix_acl *acl, *clone;
int rc;

inode->i_default_acl = NULL;
inode->i_acl = NULL;
cache_no_acl(inode);

if (S_ISLNK(*i_mode))
return 0; /* Symlink always has no-ACL */
Expand Down
13 changes: 7 additions & 6 deletions trunk/fs/jfs/acl.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,15 +118,16 @@ static int jfs_set_acl(tid_t tid, struct inode *inode, int type,

static int jfs_check_acl(struct inode *inode, int mask)
{
if (inode->i_acl == ACL_NOT_CACHED) {
struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);

if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return error;
}

if (inode->i_acl)
return posix_acl_permission(inode, inode->i_acl, mask);
return -EAGAIN;
}

Expand Down
10 changes: 10 additions & 0 deletions trunk/include/linux/posix_acl.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *);

#ifdef CONFIG_FS_POSIX_ACL
static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
{
struct posix_acl **p, *acl;
Expand Down Expand Up @@ -146,5 +147,14 @@ static inline void forget_cached_acl(struct inode *inode, int type)
if (old != ACL_NOT_CACHED)
posix_acl_release(old);
}
#endif

static inline void cache_no_acl(struct inode *inode)
{
#ifdef CONFIG_FS_POSIX_ACL
inode->i_acl = NULL;
inode->i_default_acl = NULL;
#endif
}

#endif /* __LINUX_POSIX_ACL_H */
45 changes: 24 additions & 21 deletions trunk/kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,25 @@ void put_futex_key(int fshared, union futex_key *key)
drop_futex_key_refs(key);
}

/*
* fault_in_user_writeable - fault in user address and verify RW access
* @uaddr: pointer to faulting user space address
*
* Slow path to fixup the fault we just took in the atomic write
* access to @uaddr.
*
* We have no generic implementation of a non destructive write to the
* user address. We know that we faulted in the atomic pagefault
* disabled section so we can as well avoid the #PF overhead by
* calling get_user_pages() right away.
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
sizeof(*uaddr), 1, 0, NULL, NULL);
return ret < 0 ? ret : 0;
}

/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
Expand Down Expand Up @@ -896,7 +915,6 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
retry_private:
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
u32 dummy;

double_unlock_hb(hb1, hb2);

Expand All @@ -914,7 +932,7 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
goto out_put_keys;
}

ret = get_user(dummy, uaddr2);
ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;

Expand Down Expand Up @@ -1204,7 +1222,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
double_unlock_hb(hb1, hb2);
put_futex_key(fshared, &key2);
put_futex_key(fshared, &key1);
ret = get_user(curval2, uaddr2);
ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
Expand Down Expand Up @@ -1482,7 +1500,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
handle_fault:
spin_unlock(q->lock_ptr);

ret = get_user(uval, uaddr);
ret = fault_in_user_writeable(uaddr);

spin_lock(q->lock_ptr);

Expand Down Expand Up @@ -1807,7 +1825,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_hash_bucket *hb;
u32 uval;
struct futex_q q;
int res, ret;

Expand Down Expand Up @@ -1909,16 +1926,9 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
return ret != -EINTR ? ret : -ERESTARTNOINTR;

uaddr_faulted:
/*
* We have to r/w *(int __user *)uaddr, and we have to modify it
* atomically. Therefore, if we continue to fault after get_user()
* below, we need to handle the fault ourselves, while still holding
* the mmap_sem. This can occur if the uaddr is under contention as
* we have to drop the mmap_sem in order to call get_user().
*/
queue_unlock(&q, hb);

ret = get_user(uval, uaddr);
ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;

Expand Down Expand Up @@ -2013,17 +2023,10 @@ static int futex_unlock_pi(u32 __user *uaddr, int fshared)
return ret;

pi_faulted:
/*
* We have to r/w *(int __user *)uaddr, and we have to modify it
* atomically. Therefore, if we continue to fault after get_user()
* below, we need to handle the fault ourselves, while still holding
* the mmap_sem. This can occur if the uaddr is under contention as
* we have to drop the mmap_sem in order to call get_user().
*/
spin_unlock(&hb->lock);
put_futex_key(fshared, &key);

ret = get_user(uval, uaddr);
ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;

Expand Down
4 changes: 2 additions & 2 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1153,10 +1153,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
* allocate greater than single-page units with
* allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
WARN_ON_ONCE(order > 0);
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
Expand Down
5 changes: 1 addition & 4 deletions trunk/mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode,
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
cache_no_acl(inode);

switch (mode & S_IFMT) {
default:
Expand Down Expand Up @@ -2379,10 +2380,6 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
if (!p)
return NULL;
#ifdef CONFIG_TMPFS_POSIX_ACL
p->vfs_inode.i_acl = NULL;
p->vfs_inode.i_default_acl = NULL;
#endif
return &p->vfs_inode;
}

Expand Down
10 changes: 8 additions & 2 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;

flags |= s->allocflags;

page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
oo);
/*
* Let the initial higher-order allocation fail under memory pressure
* so we fall-back to the minimum order allocation.
*/
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;

page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
/*
Expand Down

0 comments on commit 73ac2c6

Please sign in to comment.