Skip to content

Commit

Permalink
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "The main one is a horrible macro fix for our TLB flushing code which
  resulted in over-invalidation on the MMU notifier path.

  Summary:

   - Fix population of the vmemmap for regions of memory that are
     smaller than a section (128 MiB)

   - Fix range-based TLB over-invalidation when invoked via a MMU
     notifier"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  Fix mmu notifiers for range-based invalidates
  arm64: mm: Populate vmemmap at the page level if not section aligned
  • Loading branch information
Linus Torvalds committed Mar 14, 2025
2 parents 2eaca8a + f7edb07 commit ef92486
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 11 deletions.
22 changes: 12 additions & 10 deletions arch/arm64/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \
do { \
typeof(start) __flush_start = start; \
typeof(pages) __flush_pages = pages; \
int num = 0; \
int scale = 3; \
int shift = lpa2 ? 16 : PAGE_SHIFT; \
unsigned long addr; \
\
while (pages > 0) { \
while (__flush_pages > 0) { \
if (!system_supports_tlb_range() || \
pages == 1 || \
(lpa2 && start != ALIGN(start, SZ_64K))) { \
addr = __TLBI_VADDR(start, asid); \
__flush_pages == 1 || \
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
addr = __TLBI_VADDR(__flush_start, asid); \
__tlbi_level(op, addr, tlb_level); \
if (tlbi_user) \
__tlbi_user_level(op, addr, tlb_level); \
start += stride; \
pages -= stride >> PAGE_SHIFT; \
__flush_start += stride; \
__flush_pages -= stride >> PAGE_SHIFT; \
continue; \
} \
\
num = __TLBI_RANGE_NUM(pages, scale); \
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
if (num >= 0) { \
addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
scale, num, tlb_level); \
__tlbi(r##op, addr); \
if (tlbi_user) \
__tlbi_user(r##op, addr); \
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
pages -= __TLBI_RANGE_PAGES(num, scale); \
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
} \
scale--; \
} \
Expand Down
5 changes: 4 additions & 1 deletion arch/arm64/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1177,8 +1177,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
/* [start, end] should be within one section */
WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page));

if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) ||
(end - start < PAGES_PER_SECTION * sizeof(struct page)))
return vmemmap_populate_basepages(start, end, node, altmap);
else
return vmemmap_populate_hugepages(start, end, node, altmap);
Expand Down

0 comments on commit ef92486

Please sign in to comment.