From c962a6e7463980a513e990a7a8a9967e529ad467 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:26 +0100 Subject: [PATCH] riscv: Improve flush_tlb_range() for hugetlb pages flush_tlb_range() uses a fixed stride of PAGE_SIZE and in its current form, when a hugetlb mapping needs to be flushed, flush_tlb_range() flushes the whole tlb: so set a stride of the size of the hugetlb mapping in order to only flush the hugetlb mapping. However, if the hugepage is a NAPOT region, all PTEs that constitute this mapping must be invalidated, so the stride size must actually be the size of the PTE. Note that THPs are directly handled by flush_pmd_tlb_range(). Signed-off-by: Alexandre Ghiti Reviewed-by: Samuel Holland Tested-by: Lad Prabhakar # On RZ/Five SMARC Link: https://lore.kernel.org/r/20231030133027.19542-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/tlbflush.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index fa03289853d8a..b6d712a823060 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -147,7 +148,33 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE); + unsigned long stride_size; + + if (!is_vm_hugetlb_page(vma)) { + stride_size = PAGE_SIZE; + } else { + stride_size = huge_page_size(hstate_vma(vma)); + + /* + * As stated in the privileged specification, every PTE in a + * NAPOT region must be invalidated, so reset the stride in that + * case. + */ + if (has_svnapot()) { + if (stride_size >= PGDIR_SIZE) + stride_size = PGDIR_SIZE; + else if (stride_size >= P4D_SIZE) + stride_size = P4D_SIZE; + else if (stride_size >= PUD_SIZE) + stride_size = PUD_SIZE; + else if (stride_size >= PMD_SIZE) + stride_size = PMD_SIZE; + else + stride_size = PAGE_SIZE; + } + } + + __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,