Skip to content

Commit

Permalink
mm: replace init_page_accessed by __SetPageReferenced
Browse files Browse the repository at this point in the history
Do we really need an exported alias for __SetPageReferenced()? Its
callers better know what they're doing, in which case the page would not
be already marked referenced.  Kill init_page_accessed(), just
__SetPageReferenced() inline.

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Prabhakar Lad <prabhakar.csengg@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Aug 7, 2014
1 parent c2ea218 commit eb39d61
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 15 deletions.
1 change: 0 additions & 1 deletion include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,6 @@ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void init_page_accessed(struct page *page);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
Expand Down
4 changes: 2 additions & 2 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1091,9 +1091,9 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
fgp_flags |= FGP_LOCK;

/* Init accessed so avoit atomic mark_page_accessed later */
/* Init accessed so avoid atomic mark_page_accessed later */
if (fgp_flags & FGP_ACCESSED)
init_page_accessed(page);
__SetPageReferenced(page);

err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
if (unlikely(err)) {
Expand Down
2 changes: 1 addition & 1 deletion mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1166,7 +1166,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
__SetPageSwapBacked(page);
__set_page_locked(page);
if (sgp == SGP_WRITE)
init_page_accessed(page);
__SetPageReferenced(page);

error = mem_cgroup_charge_file(page, current->mm,
gfp & GFP_RECLAIM_MASK);
Expand Down
14 changes: 3 additions & 11 deletions mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -589,6 +589,9 @@ static void __lru_cache_activate_page(struct page *page)
* inactive,unreferenced -> inactive,referenced
* inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced
*
* When a newly allocated page is not yet visible, so safe for non-atomic ops,
* __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
*/
void mark_page_accessed(struct page *page)
{
Expand All @@ -614,17 +617,6 @@ void mark_page_accessed(struct page *page)
}
EXPORT_SYMBOL(mark_page_accessed);

/*
* Used to mark_page_accessed(page) that is not visible yet and when it is
* still safe to use non-atomic ops
*/
void init_page_accessed(struct page *page)
{
if (!PageReferenced(page))
__SetPageReferenced(page);
}
EXPORT_SYMBOL(init_page_accessed);

static void __lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
Expand Down

0 comments on commit eb39d61

Please sign in to comment.