diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h index fab124ada1c7f..f3a9476a71b3a 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -10,6 +10,7 @@ #include #include +#include #ifdef CONFIG_PPC64_ELF_ABI_V1 #define ARCH_FUNC_PREFIX "." @@ -25,6 +26,10 @@ static inline void disable_kfence(void) static inline bool arch_kfence_init_pool(void) { +#ifdef CONFIG_PPC64 + if (!radix_enabled()) + return false; +#endif return !kfence_disabled; } #endif diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index e1eadd03f1339..296bb74dbf400 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -431,7 +431,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, break; cond_resched(); - if (debug_pagealloc_enabled_or_kfence() && + if (debug_pagealloc_enabled() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; } @@ -814,7 +814,7 @@ static void __init htab_init_page_sizes(void) bool aligned = true; init_hpte_page_sizes(); - if (!debug_pagealloc_enabled_or_kfence()) { + if (!debug_pagealloc_enabled()) { /* * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default @@ -1134,7 +1134,7 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); - if (debug_pagealloc_enabled_or_kfence()) { + if (debug_pagealloc_enabled()) { linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = memblock_alloc_try_nid( linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, @@ -2120,7 +2120,7 @@ void hpt_do_stress(unsigned long ea, unsigned long hpte_group) } } -#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) +#ifdef CONFIG_DEBUG_PAGEALLOC static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) @@ -2194,7 +2194,13 @@ int hash__kernel_map_pages(struct page *page, int numpages, int enable) local_irq_restore(flags); return 0; } -#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */ +#else /* CONFIG_DEBUG_PAGEALLOC */ +int hash__kernel_map_pages(struct page *page, int numpages, + int enable) +{ + return 0; +} +#endif /* CONFIG_DEBUG_PAGEALLOC */ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size)