From 84345c7f44189ebfa37387f09e348b19beabf657 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 11 Jul 2012 14:02:47 -0700 Subject: [PATCH] --- yaml --- r: 311838 b: refs/heads/master c: d189922862e03ce6c7adc1e99d3b94e632dc8e89 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/shmem.c | 41 +++++++++++++++++++++++++++++------------ 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/[refs] b/[refs] index 639f5e5aa879..3f813705baca 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f21f8062201fc6361f65de92e758a76375ba8c59 +refs/heads/master: d189922862e03ce6c7adc1e99d3b94e632dc8e89 diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index 3f696f7d9bac..294364a24a1f 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -263,6 +263,24 @@ static int shmem_radix_tree_replace(struct address_space *mapping, return 0; } +/* + * Sometimes, before we decide whether to proceed or to fail, we must check + * that an entry was not already brought back from swap by a racing thread. + * + * Checking page is not enough: by the time a SwapCache page is locked, it + * might be reused, and again be SwapCache, using the same swap as before. + */ +static bool shmem_confirm_swap(struct address_space *mapping, + pgoff_t index, swp_entry_t swap) +{ + void *item; + + rcu_read_lock(); + item = radix_tree_lookup(&mapping->page_tree, index); + rcu_read_unlock(); + return item == swp_to_radix_entry(swap); +} + /* * Like add_to_page_cache_locked, but error if expected item has gone. */ @@ -1124,9 +1142,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, /* We have to do this with page locked to prevent races */ lock_page(page); if (!PageSwapCache(page) || page_private(page) != swap.val || - page->mapping) { + !shmem_confirm_swap(mapping, index, swap)) { error = -EEXIST; /* try again */ - goto failed; + goto unlock; } if (!PageUptodate(page)) { error = -EIO; @@ -1142,9 +1160,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, error = mem_cgroup_cache_charge(page, current->mm, gfp & GFP_RECLAIM_MASK); - if (!error) + if (!error) { error = shmem_add_to_page_cache(page, mapping, index, gfp, swp_to_radix_entry(swap)); + /* We already confirmed swap, and make no allocation */ + VM_BUG_ON(error); + } if (error) goto failed; @@ -1245,14 +1266,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, unacct: shmem_unacct_blocks(info->flags, 1); failed: - if (swap.val && error != -EINVAL) { - struct page *test = find_get_page(mapping, index); - if (test && !radix_tree_exceptional_entry(test)) - page_cache_release(test); - /* Have another try if the entry has changed */ - if (test != swp_to_radix_entry(swap)) - error = -EEXIST; - } + if (swap.val && error != -EINVAL && + !shmem_confirm_swap(mapping, index, swap)) + error = -EEXIST; +unlock: if (page) { unlock_page(page); page_cache_release(page); @@ -1264,7 +1281,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, spin_unlock(&info->lock); goto repeat; } - if (error == -EEXIST) + if (error == -EEXIST) /* from above or from radix_tree_insert */ goto repeat; return error; }