Skip to content

Commit

Permalink
arm64: tlb: Set the TTL field in flush_tlb_range
Browse files Browse the repository at this point in the history
This patch uses the cleared_* in struct mmu_gather to set the
TTL field in flush_tlb_range().

Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20200625080314.230-6-yezhenyu2@huawei.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
  • Loading branch information
Zhenyu Ye authored and Catalin Marinas committed Jul 7, 2020
1 parent 2631ed0 commit c4ab2cb
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 7 deletions.
29 changes: 28 additions & 1 deletion arch/arm64/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,37 @@ static void tlb_flush(struct mmu_gather *tlb);

#include <asm-generic/tlb.h>

/*
* get the tlbi levels in arm64. Default value is 0 if more than one
* of cleared_* is set or neither is set.
* Arm64 doesn't support p4ds now.
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))
return 3;

if (tlb->cleared_pmds && !(tlb->cleared_ptes ||
tlb->cleared_puds ||
tlb->cleared_p4ds))
return 2;

if (tlb->cleared_puds && !(tlb->cleared_ptes ||
tlb->cleared_pmds ||
tlb->cleared_p4ds))
return 1;

return 0;
}

static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
bool last_level = !tlb->freed_tables;
unsigned long stride = tlb_get_unmap_size(tlb);
int tlb_level = tlb_get_level(tlb);

/*
* If we're tearing down the address space then we only care about
Expand All @@ -38,7 +64,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
return;
}

__flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
last_level, tlb_level);
}

static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
Expand Down
14 changes: 8 additions & 6 deletions arch/arm64/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,

static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long stride, bool last_level)
unsigned long stride, bool last_level,
int tlb_level)
{
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
Expand All @@ -252,11 +253,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += stride) {
if (last_level) {
__tlbi_level(vale1is, addr, 0);
__tlbi_user_level(vale1is, addr, 0);
__tlbi_level(vale1is, addr, tlb_level);
__tlbi_user_level(vale1is, addr, tlb_level);
} else {
__tlbi_level(vae1is, addr, 0);
__tlbi_user_level(vae1is, addr, 0);
__tlbi_level(vae1is, addr, tlb_level);
__tlbi_user_level(vae1is, addr, tlb_level);
}
}
dsb(ish);
Expand All @@ -268,8 +269,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
/*
* We cannot use leaf-only invalidation here, since we may be invalidating
* table entries as part of collapsing hugepages or moving page tables.
* Set the tlb_level to 0 because we can not get enough information here.
*/
__flush_tlb_range(vma, start, end, PAGE_SIZE, false);
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
}

static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
Expand Down

0 comments on commit c4ab2cb

Please sign in to comment.