Skip to content

Commit

Permalink
shmem: cleanup shmem_add_to_page_cache
Browse files Browse the repository at this point in the history
shmem_add_to_page_cache() has three callsites, but only one of them wants
the radix_tree_preload() (an exceptional entry guarantees that the radix
tree node is present in the other cases), and only that site can achieve
mem_cgroup_uncharge_cache_page() (PageSwapCache makes it a no-op in the
other cases).  We did it this way originally to reflect
add_to_page_cache_locked(); but it's confusing now, so move the radix_tree
preloading and mem_cgroup uncharging to that one caller.

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Jul 11, 2012
1 parent d189922 commit b065b43
Showing 1 changed file with 28 additions and 30 deletions.
58 changes: 28 additions & 30 deletions mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,40 +288,31 @@ static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected)
{
int error = 0;
int error;

VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page));

page_cache_get(page);
page->mapping = mapping;
page->index = index;

spin_lock_irq(&mapping->tree_lock);
if (!expected)
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
error = radix_tree_insert(&mapping->page_tree, index, page);
else
error = shmem_radix_tree_replace(mapping, index, expected,
page);
if (!error) {
page_cache_get(page);
page->mapping = mapping;
page->index = index;

spin_lock_irq(&mapping->tree_lock);
if (!expected)
error = radix_tree_insert(&mapping->page_tree,
index, page);
else
error = shmem_radix_tree_replace(mapping, index,
expected, page);
if (!error) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
}
if (!expected)
radix_tree_preload_end();
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
}
if (error)
mem_cgroup_uncharge_cache_page(page);
return error;
}

Expand Down Expand Up @@ -1202,11 +1193,18 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
__set_page_locked(page);
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (!error)
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
if (error)
goto decused;
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
radix_tree_preload_end();
}
if (error) {
mem_cgroup_uncharge_cache_page(page);
goto decused;
}
lru_cache_add_anon(page);

spin_lock(&info->lock);
Expand Down

0 comments on commit b065b43

Please sign in to comment.