Skip to content

Commit

Permalink
mm/swap: Add folio_mark_accessed()
Browse files Browse the repository at this point in the history
Convert mark_page_accessed() to folio_mark_accessed().  It already
operated on the entire compound page, but now we can avoid calling
compound_head quite so many times.  Shrinks the function from 424 bytes
to 295 bytes (shrinking by 129 bytes).  The compatibility wrapper is 30
bytes, plus the 8 bytes for the exported symbol means the kernel shrinks
by 91 bytes.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
  • Loading branch information
Matthew Wilcox (Oracle) committed Oct 18, 2021
1 parent f2d2739 commit 76580b6
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 19 deletions.
3 changes: 2 additions & 1 deletion include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,8 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file,
unsigned int nr_pages);
extern void lru_note_cost_page(struct page *);
extern void lru_cache_add(struct page *);
extern void mark_page_accessed(struct page *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);

extern atomic_t lru_disable_count;

Expand Down
7 changes: 7 additions & 0 deletions mm/folio-compat.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
*/

#include <linux/pagemap.h>
#include <linux/swap.h>

struct address_space *page_mapping(struct page *page)
{
Expand Down Expand Up @@ -41,3 +42,9 @@ bool page_mapped(struct page *page)
return folio_mapped(page_folio(page));
}
EXPORT_SYMBOL(page_mapped);

void mark_page_accessed(struct page *page)
{
folio_mark_accessed(page_folio(page));
}
EXPORT_SYMBOL(mark_page_accessed);
34 changes: 16 additions & 18 deletions mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ static void folio_activate(struct folio *folio)
}
#endif

static void __lru_cache_activate_page(struct page *page)
static void __lru_cache_activate_folio(struct folio *folio)
{
struct pagevec *pvec;
int i;
Expand All @@ -389,8 +389,8 @@ static void __lru_cache_activate_page(struct page *page)
for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
struct page *pagevec_page = pvec->pages[i];

if (pagevec_page == page) {
SetPageActive(page);
if (pagevec_page == &folio->page) {
folio_set_active(folio);
break;
}
}
Expand All @@ -408,36 +408,34 @@ static void __lru_cache_activate_page(struct page *page)
* When a newly allocated page is not yet visible, so safe for non-atomic ops,
* __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
*/
void mark_page_accessed(struct page *page)
void folio_mark_accessed(struct folio *folio)
{
page = compound_head(page);

if (!PageReferenced(page)) {
SetPageReferenced(page);
} else if (PageUnevictable(page)) {
if (!folio_test_referenced(folio)) {
folio_set_referenced(folio);
} else if (folio_test_unevictable(folio)) {
/*
* Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
* this list is never rotated or maintained, so marking an
* evictable page accessed has no effect.
*/
} else if (!PageActive(page)) {
} else if (!folio_test_active(folio)) {
/*
* If the page is on the LRU, queue it for activation via
* lru_pvecs.activate_page. Otherwise, assume the page is on a
* pagevec, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
if (PageLRU(page))
folio_activate(page_folio(page));
if (folio_test_lru(folio))
folio_activate(folio);
else
__lru_cache_activate_page(page);
ClearPageReferenced(page);
workingset_activation(page_folio(page));
__lru_cache_activate_folio(folio);
folio_clear_referenced(folio);
workingset_activation(folio);
}
if (page_is_idle(page))
clear_page_idle(page);
if (folio_test_idle(folio))
folio_clear_idle(folio);
}
EXPORT_SYMBOL(mark_page_accessed);
EXPORT_SYMBOL(folio_mark_accessed);

/**
* lru_cache_add - add a page to a page list
Expand Down

0 comments on commit 76580b6

Please sign in to comment.