Skip to content

Commit

Permalink
mm/rmap: Add folio_mkclean()
Browse files Browse the repository at this point in the history
Transform page_mkclean() into folio_mkclean() and add a page_mkclean()
wrapper around folio_mkclean().

folio_mkclean is 15 bytes smaller than page_mkclean, but the kernel
is enlarged by 33 bytes due to inlining page_folio() into each caller.
This will go away once the callers are converted to use folio_mkclean().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
  • Loading branch information
Matthew Wilcox (Oracle) committed Oct 18, 2021
1 parent 76580b6 commit d9c08e2
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 10 deletions.
10 changes: 6 additions & 4 deletions include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
int page_mkclean(struct page *);
int folio_mkclean(struct folio *);

/*
* called in munlock()/munmap() path to check for other vmas holding
Expand Down Expand Up @@ -295,12 +295,14 @@ static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
{
}

static inline int page_mkclean(struct page *page)
static inline int folio_mkclean(struct folio *folio)
{
return 0;
}


#endif /* CONFIG_MMU */

static inline int page_mkclean(struct page *page)
{
return folio_mkclean(page_folio(page));
}
#endif /* _LINUX_RMAP_H */
12 changes: 6 additions & 6 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -981,7 +981,7 @@ static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
return true;
}

int page_mkclean(struct page *page)
int folio_mkclean(struct folio *folio)
{
int cleaned = 0;
struct address_space *mapping;
Expand All @@ -991,20 +991,20 @@ int page_mkclean(struct page *page)
.invalid_vma = invalid_mkclean_vma,
};

BUG_ON(!PageLocked(page));
BUG_ON(!folio_test_locked(folio));

if (!page_mapped(page))
if (!folio_mapped(folio))
return 0;

mapping = page_mapping(page);
mapping = folio_mapping(folio);
if (!mapping)
return 0;

rmap_walk(page, &rwc);
rmap_walk(&folio->page, &rwc);

return cleaned;
}
EXPORT_SYMBOL_GPL(page_mkclean);
EXPORT_SYMBOL_GPL(folio_mkclean);

/**
* page_move_anon_rmap - move a page to our anon_vma
Expand Down

0 comments on commit d9c08e2

Please sign in to comment.