Skip to content

Commit

Permalink
mm: remove vmacache
Browse files Browse the repository at this point in the history
By using the maple tree and the maple tree state, the vmacache is no
longer beneficial and is complicating the VMA code.  Remove the vmacache
to reduce the work in keeping it up to date and code complexity.

Link: https://lkml.kernel.org/r/20220906194824.2110408-26-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Liam R. Howlett authored and Andrew Morton committed Sep 27, 2022
1 parent 4dd1b84 commit 7964cf8
Show file tree
Hide file tree
Showing 17 changed files with 9 additions and 267 deletions.
3 changes: 0 additions & 3 deletions fs/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
Expand Down Expand Up @@ -1027,8 +1026,6 @@ static int exec_mmap(struct mm_struct *mm)
activate_mm(active_mm, mm);
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
tsk->mm->vmacache_seqnum = 0;
vmacache_flush(tsk);
task_unlock(tsk);
lru_gen_use_mm(mm);

Expand Down
1 change: 0 additions & 1 deletion fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pagewalk.h>
#include <linux/vmacache.h>
#include <linux/mm_inline.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
Expand Down
1 change: 0 additions & 1 deletion include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,6 @@ struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct maple_tree mm_mt;
u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
Expand Down
12 changes: 0 additions & 12 deletions include/linux/mm_types_task.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,6 @@
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)

/*
* The per task VMA cache array:
*/
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)

struct vmacache {
u64 seqnum;
struct vm_area_struct *vmas[VMACACHE_SIZE];
};

/*
* When updating this, please also update struct resident_page_types[] in
* kernel/fork.c
Expand Down
1 change: 0 additions & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,6 @@ struct task_struct {
struct mm_struct *active_mm;

/* Per-thread vma caching: */
struct vmacache vmacache;

#ifdef SPLIT_RSS_COUNTING
struct task_rss_stat rss_stat;
Expand Down
4 changes: 0 additions & 4 deletions include/linux/vm_event_item.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
#endif
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
Expand Down
28 changes: 0 additions & 28 deletions include/linux/vmacache.h

This file was deleted.

6 changes: 0 additions & 6 deletions include/linux/vmstat.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,12 +125,6 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif

#ifdef CONFIG_DEBUG_VM_VMACACHE
#define count_vm_vmacache_event(x) count_vm_event(x)
#else
#define count_vm_vmacache_event(x) do {} while (0)
#endif

#define __count_zid_vm_events(item, zid, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)

Expand Down
12 changes: 0 additions & 12 deletions kernel/debug/debug_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
#include <linux/pid.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/rcupdate.h>
#include <linux/irq.h>
#include <linux/security.h>
Expand Down Expand Up @@ -283,17 +282,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
if (!CACHE_FLUSH_IS_SAFE)
return;

if (current->mm) {
int i;

for (i = 0; i < VMACACHE_SIZE; i++) {
if (!current->vmacache.vmas[i])
continue;
flush_cache_range(current->vmacache.vmas[i],
addr, addr + BREAK_INSTR_SIZE);
}
}

/* Force flush instruction cache if it was outside the mm */
flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
}
Expand Down
5 changes: 0 additions & 5 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
Expand Down Expand Up @@ -1128,7 +1127,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->mmap = NULL;
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
seqcount_init(&mm->write_protect_seq);
Expand Down Expand Up @@ -1585,9 +1583,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
if (!oldmm)
return 0;

/* initialize the new vmacache entries */
vmacache_flush(tsk);

if (clone_flags & CLONE_VM) {
mmget(oldmm);
mm = oldmm;
Expand Down
8 changes: 0 additions & 8 deletions lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -812,14 +812,6 @@ config DEBUG_VM

If unsure, say N.

config DEBUG_VM_VMACACHE
bool "Debug VMA caching"
depends on DEBUG_VM
help
Enable this to turn on VMA caching debug information. Doing so
can cause significant overhead, so only enable it in non-production
environments.

config DEBUG_VM_MAPLE_TREE
bool "Debug VM maple trees"
depends on DEBUG_VM
Expand Down
2 changes: 1 addition & 1 deletion mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \
compaction.o vmacache.o \
compaction.o \
interval_tree.o list_lru.o workingset.o \
debug.o gup.o mmap_lock.o $(mmu-y)

Expand Down
4 changes: 2 additions & 2 deletions mm/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ EXPORT_SYMBOL(dump_vma);

void dump_mm(const struct mm_struct *mm)
{
pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
pr_emerg("mm %px mmap %px task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
Expand Down Expand Up @@ -183,7 +183,7 @@ void dump_mm(const struct mm_struct *mm)
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",

mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
mm, mm->mmap, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
Expand Down
31 changes: 2 additions & 29 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
Expand Down Expand Up @@ -680,9 +679,6 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
/* Remove from mm linked list - also updates highest_vm_end */
__vma_unlink_list(mm, next);

/* Kill the cache */
vmacache_invalidate(mm);

if (file)
__remove_shared_vm_struct(next, file, mapping);

Expand Down Expand Up @@ -923,8 +919,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
__vma_unlink_list(mm, next);
if (remove_next == 2)
__vma_unlink_list(mm, next_next);
/* Kill the cache */
vmacache_invalidate(mm);

if (file) {
__remove_shared_vm_struct(next, file, mapping);
Expand Down Expand Up @@ -2233,19 +2227,10 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
unsigned long end_addr)
{
struct vm_area_struct *vma;
unsigned long index = start_addr;

mmap_assert_locked(mm);
/* Check the cache first. */
vma = vmacache_find(mm, start_addr);
if (likely(vma))
return vma;

vma = mt_find(&mm->mm_mt, &index, end_addr - 1);
if (vma)
vmacache_update(start_addr, vma);
return vma;
return mt_find(&mm->mm_mt, &index, end_addr - 1);
}
EXPORT_SYMBOL(find_vma_intersection);

Expand All @@ -2259,19 +2244,10 @@ EXPORT_SYMBOL(find_vma_intersection);
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
unsigned long index = addr;

mmap_assert_locked(mm);
/* Check the cache first. */
vma = vmacache_find(mm, addr);
if (likely(vma))
return vma;

vma = mt_find(&mm->mm_mt, &index, ULONG_MAX);
if (vma)
vmacache_update(addr, vma);
return vma;
return mt_find(&mm->mm_mt, &index, ULONG_MAX);
}
EXPORT_SYMBOL(find_vma);

Expand Down Expand Up @@ -2660,9 +2636,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas,
mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;

/* Kill the cache */
vmacache_invalidate(mm);

/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
* VM_GROWSUP VMA. Such VMAs can change their size under
Expand Down
37 changes: 4 additions & 33 deletions mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/vmacache.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
Expand Down Expand Up @@ -598,23 +597,12 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
*/
static void delete_vma_from_mm(struct vm_area_struct *vma)
{
int i;
struct address_space *mapping;
struct mm_struct *mm = vma->vm_mm;
struct task_struct *curr = current;
MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);

mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) {
/* if the vma is cached, invalidate the entire cache */
if (curr->vmacache.vmas[i] == vma) {
vmacache_invalidate(mm);
break;
}
}

vma->vm_mm->map_count--;
/* remove the VMA from the mapping */
if (vma->vm_file) {
struct address_space *mapping;
mapping = vma->vm_file->f_mapping;

i_mmap_lock_write(mapping);
Expand All @@ -626,7 +614,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)

/* remove from the MM's tree and list */
vma_mas_remove(vma, &mas);
__vma_unlink_list(mm, vma);
__vma_unlink_list(vma->vm_mm, vma);
}

/*
Expand Down Expand Up @@ -659,20 +647,9 @@ EXPORT_SYMBOL(find_vma_intersection);
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
MA_STATE(mas, &mm->mm_mt, addr, addr);

/* check the cache first */
vma = vmacache_find(mm, addr);
if (likely(vma))
return vma;

vma = mas_walk(&mas);

if (vma)
vmacache_update(addr, vma);

return vma;
return mas_walk(&mas);
}
EXPORT_SYMBOL(find_vma);

Expand Down Expand Up @@ -706,11 +683,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
unsigned long end = addr + len;
MA_STATE(mas, &mm->mm_mt, addr, addr);

/* check the cache first */
vma = vmacache_find_exact(mm, addr, end);
if (vma)
return vma;

vma = mas_walk(&mas);
if (!vma)
return NULL;
Expand All @@ -719,7 +691,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
if (vma->vm_end != end)
return NULL;

vmacache_update(addr, vma);
return vma;
}

Expand Down
Loading

0 comments on commit 7964cf8

Please sign in to comment.