From cc5734481b3c24ddee1551f9732d743453bca010 Mon Sep 17 00:00:00 2001 From: "Ritesh Harjani (IBM)" Date: Fri, 18 Oct 2024 22:59:45 +0530 Subject: [PATCH] book3s64/hash: Add hash_debug_pagealloc_add_slot() function This adds hash_debug_pagealloc_add_slot() function instead of open coding that in htab_bolt_mapping(). This is required since we will be separating kfence functionality to not depend upon debug_pagealloc. No functionality change in this patch. Signed-off-by: Ritesh Harjani (IBM) Signed-off-by: Michael Ellerman Link: https://patch.msgid.link/026f0aaa1dddd89154dc8d20ceccfca4f63ccf79.1729271995.git.ritesh.list@gmail.com --- arch/powerpc/mm/book3s64/hash_utils.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 82151fff96481..6e38602243512 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -328,6 +328,14 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) mmu_kernel_ssize, 0); } +static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) +{ + if (!debug_pagealloc_enabled()) + return; + if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) + linear_map_hash_slots[paddr >> PAGE_SHIFT] = slot | 0x80; +} + int hash__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long flags, vaddr, lmi; @@ -353,6 +361,7 @@ int hash__kernel_map_pages(struct page *page, int numpages, { return 0; } +static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ /* @@ -513,9 +522,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, break; cond_resched(); - if (debug_pagealloc_enabled() && - (paddr >> PAGE_SHIFT) < linear_map_hash_count) - linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; + hash_debug_pagealloc_add_slot(paddr, ret); } return ret < 0 ? ret : 0; }