From f7092393570f24865199d1642eb097f9e1c8f01e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 17 Nov 2023 16:14:44 +0000 Subject: [PATCH] memory-failure: convert delete_from_lru_cache() to take a folio All three callers now have a folio; pass it in instead of the page. Saves five calls to compound_head(). Link: https://lkml.kernel.org/r/20231117161447.2461643-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/memory-failure.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d2764fd3e4484..e73f2047ffcbe 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -902,26 +902,26 @@ static const char * const action_page_types[] = { * The page count will stop it from being freed by unpoison. * Stress tests should be aware of this memory leak problem. */ -static int delete_from_lru_cache(struct page *p) +static int delete_from_lru_cache(struct folio *folio) { - if (isolate_lru_page(p)) { + if (folio_isolate_lru(folio)) { /* * Clear sensible page flags, so that the buddy system won't - * complain when the page is unpoison-and-freed. + * complain when the folio is unpoison-and-freed. */ - ClearPageActive(p); - ClearPageUnevictable(p); + folio_clear_active(folio); + folio_clear_unevictable(folio); /* * Poisoned page might never drop its ref count to 0 so we have * to uncharge it manually from its memcg. */ - mem_cgroup_uncharge(page_folio(p)); + mem_cgroup_uncharge(folio); /* - * drop the page count elevated by isolate_lru_page() + * drop the refcount elevated by folio_isolate_lru() */ - put_page(p); + folio_put(folio); return 0; } return -EIO; @@ -1019,7 +1019,7 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p) struct address_space *mapping; bool extra_pins; - delete_from_lru_cache(p); + delete_from_lru_cache(folio); /* * For anonymous folios the only reference left @@ -1146,7 +1146,7 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p) /* Trigger EIO in shmem: */ folio_clear_uptodate(folio); - ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; + ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED; folio_unlock(folio); if (ret == MF_DELAYED) @@ -1165,7 +1165,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p) delete_from_swap_cache(folio); - ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; + ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED; folio_unlock(folio); if (has_extra_refcount(ps, p, false))