Skip to content

Commit

Permalink
mm: mlock: update the interface to use folios
Browse files Browse the repository at this point in the history
Update the mlock interface to accept folios rather than pages, bringing
the interface in line with the internal implementation.

munlock_vma_page() still requires a page_folio() conversion, however this
is consistent with the existent mlock_vma_page() implementation and a
product of rmap still dealing in pages rather than folios.

Link: https://lkml.kernel.org/r/cba12777c5544305014bc0cbec56bb4cc71477d8.1673526881.git.lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Lorenzo Stoakes authored and Andrew Morton committed Feb 3, 2023
1 parent b213ef6 commit 96f97c4
Show file tree
Hide file tree
Showing 6 changed files with 49 additions and 45 deletions.
38 changes: 22 additions & 16 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -533,10 +533,9 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
* should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed.
*
* mlock is usually called at the end of page_add_*_rmap(),
* munlock at the end of page_remove_rmap(); but new anon
* pages are managed by lru_cache_add_inactive_or_unevictable()
* calling mlock_new_page().
* mlock is usually called at the end of page_add_*_rmap(), munlock at
* the end of page_remove_rmap(); but new anon folios are managed by
* folio_add_lru_vma() calling mlock_new_folio().
*
* @compound is used to include pmd mappings of THPs, but filter out
* pte mappings of THPs, which cannot be consistently counted: a pte
Expand Down Expand Up @@ -565,18 +564,25 @@ static inline void mlock_vma_page(struct page *page,
mlock_vma_folio(page_folio(page), vma, compound);
}

void munlock_page(struct page *page);
static inline void munlock_vma_page(struct page *page,
void munlock_folio(struct folio *folio);

static inline void munlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma, bool compound)
{
if (unlikely(vma->vm_flags & VM_LOCKED) &&
(compound || !PageTransCompound(page)))
munlock_page(page);
(compound || !folio_test_large(folio)))
munlock_folio(folio);
}

static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound)
{
munlock_vma_folio(page_folio(page), vma, compound);
}
void mlock_new_page(struct page *page);
bool need_mlock_page_drain(int cpu);
void mlock_page_drain_local(void);
void mlock_page_drain_remote(int cpu);
void mlock_new_folio(struct folio *folio);
bool need_mlock_drain(int cpu);
void mlock_drain_local(void);
void mlock_drain_remote(int cpu);

extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);

Expand Down Expand Up @@ -665,10 +671,10 @@ static inline void mlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_page(struct page *page) { }
static inline bool need_mlock_page_drain(int cpu) { return false; }
static inline void mlock_page_drain_local(void) { }
static inline void mlock_page_drain_remote(int cpu) { }
static inline void mlock_new_folio(struct folio *folio) { }
static inline bool need_mlock_drain(int cpu) { return false; }
static inline void mlock_drain_local(void) { }
static inline void mlock_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
}
Expand Down
2 changes: 1 addition & 1 deletion mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ static bool remove_migration_pte(struct folio *folio,
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
}
if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local();
mlock_drain_local();

trace_remove_migration_pte(pvmw.address, pte_val(pte),
compound_order(new));
Expand Down
38 changes: 18 additions & 20 deletions mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
folio_batch_reinit(fbatch);
}

void mlock_page_drain_local(void)
void mlock_drain_local(void)
{
struct folio_batch *fbatch;

Expand All @@ -221,7 +221,7 @@ void mlock_page_drain_local(void)
local_unlock(&mlock_fbatch.lock);
}

void mlock_page_drain_remote(int cpu)
void mlock_drain_remote(int cpu)
{
struct folio_batch *fbatch;

Expand All @@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu)
mlock_folio_batch(fbatch);
}

bool need_mlock_page_drain(int cpu)
bool need_mlock_drain(int cpu)
{
return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
}
Expand Down Expand Up @@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio)
}

/**
* mlock_new_page - mlock a newly allocated page not yet on LRU
* @page: page to be mlocked, either a normal page or a THP head.
* mlock_new_folio - mlock a newly allocated folio not yet on LRU
* @folio: folio to be mlocked, either normal or a THP head.
*/
void mlock_new_page(struct page *page)
void mlock_new_folio(struct folio *folio)
{
struct folio_batch *fbatch;
struct folio *folio = page_folio(page);
int nr_pages = folio_nr_pages(folio);

local_lock(&mlock_fbatch.lock);
Expand All @@ -286,13 +285,12 @@ void mlock_new_page(struct page *page)
}

/**
* munlock_page - munlock a page
* @page: page to be munlocked, either a normal page or a THP head.
* munlock_folio - munlock a folio
* @folio: folio to be munlocked, either normal or a THP head.
*/
void munlock_page(struct page *page)
void munlock_folio(struct folio *folio)
{
struct folio_batch *fbatch;
struct folio *folio = page_folio(page);

local_lock(&mlock_fbatch.lock);
fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
Expand All @@ -314,35 +312,35 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl;
pte_t *start_pte, *pte;
struct page *page;
struct folio *folio;

ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (!pmd_present(*pmd))
goto out;
if (is_huge_zero_pmd(*pmd))
goto out;
page = pmd_page(*pmd);
folio = page_folio(pmd_page(*pmd));
if (vma->vm_flags & VM_LOCKED)
mlock_folio(page_folio(page));
mlock_folio(folio);
else
munlock_page(page);
munlock_folio(folio);
goto out;
}

start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, addr, *pte);
if (!page || is_zone_device_page(page))
folio = vm_normal_folio(vma, addr, *pte);
if (!folio || folio_is_zone_device(folio))
continue;
if (PageTransCompound(page))
if (folio_test_large(folio))
continue;
if (vma->vm_flags & VM_LOCKED)
mlock_folio(page_folio(page));
mlock_folio(folio);
else
munlock_page(page);
munlock_folio(folio);
}
pte_unmap(start_pte);
out:
Expand Down
2 changes: 1 addition & 1 deletion mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -8587,7 +8587,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
struct zone *zone;

lru_add_drain_cpu(cpu);
mlock_page_drain_remote(cpu);
mlock_drain_remote(cpu);
drain_pages(cpu);

/*
Expand Down
4 changes: 2 additions & 2 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1764,7 +1764,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local();
mlock_drain_local();
folio_put(folio);
}

Expand Down Expand Up @@ -2105,7 +2105,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local();
mlock_drain_local();
folio_put(folio);
}

Expand Down
10 changes: 5 additions & 5 deletions mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -562,7 +562,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);

if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
mlock_new_page(&folio->page);
mlock_new_folio(folio);
else
folio_add_lru(folio);
}
Expand Down Expand Up @@ -781,7 +781,7 @@ void lru_add_drain(void)
local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock);
mlock_page_drain_local();
mlock_drain_local();
}

/*
Expand All @@ -796,7 +796,7 @@ static void lru_add_and_bh_lrus_drain(void)
lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock);
invalidate_bh_lrus_cpu();
mlock_page_drain_local();
mlock_drain_local();
}

void lru_add_drain_cpu_zone(struct zone *zone)
Expand All @@ -805,7 +805,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone);
local_unlock(&cpu_fbatches.lock);
mlock_page_drain_local();
mlock_drain_local();
}

#ifdef CONFIG_SMP
Expand All @@ -828,7 +828,7 @@ static bool cpu_needs_drain(unsigned int cpu)
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
folio_batch_count(&fbatches->activate) ||
need_mlock_page_drain(cpu) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
}

Expand Down

0 comments on commit 96f97c4

Please sign in to comment.