Skip to content

Commit

Permalink
book3s64/hash: Add kfence functionality
Browse files Browse the repository at this point in the history
Now that linear map functionality of debug_pagealloc is made generic,
enable kfence to use this generic infrastructure.

1. Define kfence related linear map variables.
   - u8 *linear_map_kf_hash_slots;
   - unsigned long linear_map_kf_hash_count;
   - DEFINE_RAW_SPINLOCK(linear_map_kf_hash_lock);
2. The linear map size allocated in RMA region is quite small
   (KFENCE_POOL_SIZE >> PAGE_SHIFT) which is 512 bytes by default.
3. kfence pool memory is reserved using memblock_phys_alloc() which has
   can come from anywhere.
   (default 255 objects => ((1+255) * 2) << PAGE_SHIFT = 32MB)
4. The hash slot information for kfence memory gets added in linear map
   in hash_linear_map_add_slot() (which also adds for debug_pagealloc).

Reported-by: Pavithra Prakash <pavrampu@linux.vnet.ibm.com>
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/5c2b61941b344077a2b8654dab46efa0322af3af.1729271995.git.ritesh.list@gmail.com
  • Loading branch information
Ritesh Harjani (IBM) authored and Michael Ellerman committed Oct 23, 2024
1 parent 47dd2e6 commit 8fec58f
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 18 deletions.
5 changes: 0 additions & 5 deletions arch/powerpc/include/asm/kfence.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>

#ifdef CONFIG_PPC64_ELF_ABI_V1
#define ARCH_FUNC_PREFIX "."
Expand All @@ -26,10 +25,6 @@ static inline void disable_kfence(void)

static inline bool arch_kfence_init_pool(void)
{
#ifdef CONFIG_PPC64
if (!radix_enabled())
return false;
#endif
return !kfence_disabled;
}
#endif
Expand Down
162 changes: 149 additions & 13 deletions arch/powerpc/mm/book3s64/hash_utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <linux/random.h>
#include <linux/elf-randomize.h>
#include <linux/of_fdt.h>
#include <linux/kfence.h>

#include <asm/interrupt.h>
#include <asm/processor.h>
Expand All @@ -66,6 +67,7 @@
#include <asm/pte-walk.h>
#include <asm/asm-prototypes.h>
#include <asm/ultravisor.h>
#include <asm/kfence.h>

#include <mm/mmu_decl.h>

Expand Down Expand Up @@ -271,7 +273,7 @@ void hash__tlbiel_all(unsigned int action)
WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
}

#ifdef CONFIG_DEBUG_PAGEALLOC
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static void kernel_map_linear_page(unsigned long vaddr, unsigned long idx,
u8 *slots, raw_spinlock_t *lock)
{
Expand Down Expand Up @@ -325,11 +327,13 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx,
mmu_linear_psize,
mmu_kernel_ssize, 0);
}
#endif

#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
static inline void hash_debug_pagealloc_alloc_slots(void)
static void hash_debug_pagealloc_alloc_slots(void)
{
unsigned long max_hash_count = ppc64_rma_size / 4;

Expand All @@ -352,7 +356,8 @@ static inline void hash_debug_pagealloc_alloc_slots(void)
__func__, linear_map_hash_count, &ppc64_rma_size);
}

static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot)
static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr,
int slot)
{
if (!debug_pagealloc_enabled() || !linear_map_hash_count)
return;
Expand Down Expand Up @@ -386,20 +391,148 @@ static int hash_debug_pagealloc_map_pages(struct page *page, int numpages,
return 0;
}

int hash__kernel_map_pages(struct page *page, int numpages, int enable)
#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void hash_debug_pagealloc_alloc_slots(void) {}
static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) {}
static int __maybe_unused
hash_debug_pagealloc_map_pages(struct page *page, int numpages, int enable)
{
return hash_debug_pagealloc_map_pages(page, numpages, enable);
return 0;
}
#endif /* CONFIG_DEBUG_PAGEALLOC */

#else /* CONFIG_DEBUG_PAGEALLOC */
int hash__kernel_map_pages(struct page *page, int numpages,
int enable)
#ifdef CONFIG_KFENCE
static u8 *linear_map_kf_hash_slots;
static unsigned long linear_map_kf_hash_count;
static DEFINE_RAW_SPINLOCK(linear_map_kf_hash_lock);

static phys_addr_t kfence_pool;

static inline void hash_kfence_alloc_pool(void)
{

/* allocate linear map for kfence within RMA region */
linear_map_kf_hash_count = KFENCE_POOL_SIZE >> PAGE_SHIFT;
linear_map_kf_hash_slots = memblock_alloc_try_nid(
linear_map_kf_hash_count, 1,
MEMBLOCK_LOW_LIMIT, ppc64_rma_size,
NUMA_NO_NODE);
if (!linear_map_kf_hash_slots) {
pr_err("%s: memblock for linear map (%lu) failed\n", __func__,
linear_map_kf_hash_count);
goto err;
}

/* allocate kfence pool early */
kfence_pool = memblock_phys_alloc_range(KFENCE_POOL_SIZE, PAGE_SIZE,
MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ANYWHERE);
if (!kfence_pool) {
pr_err("%s: memblock for kfence pool (%lu) failed\n", __func__,
KFENCE_POOL_SIZE);
memblock_free(linear_map_kf_hash_slots,
linear_map_kf_hash_count);
linear_map_kf_hash_count = 0;
goto err;
}
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);

return;
err:
pr_info("Disabling kfence\n");
disable_kfence();
}

static inline void hash_kfence_map_pool(void)
{
unsigned long kfence_pool_start, kfence_pool_end;
unsigned long prot = pgprot_val(PAGE_KERNEL);

if (!kfence_pool)
return;

kfence_pool_start = (unsigned long) __va(kfence_pool);
kfence_pool_end = kfence_pool_start + KFENCE_POOL_SIZE;
__kfence_pool = (char *) kfence_pool_start;
BUG_ON(htab_bolt_mapping(kfence_pool_start, kfence_pool_end,
kfence_pool, prot, mmu_linear_psize,
mmu_kernel_ssize));
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
}

static inline void hash_kfence_add_slot(phys_addr_t paddr, int slot)
{
unsigned long vaddr = (unsigned long) __va(paddr);
unsigned long lmi = (vaddr - (unsigned long)__kfence_pool)
>> PAGE_SHIFT;

if (!kfence_pool)
return;
BUG_ON(!is_kfence_address((void *)vaddr));
BUG_ON(lmi >= linear_map_kf_hash_count);
linear_map_kf_hash_slots[lmi] = slot | 0x80;
}

static int hash_kfence_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;

WARN_ON_ONCE(!linear_map_kf_hash_count);
local_irq_save(flags);
for (i = 0; i < numpages; i++, page++) {
vaddr = (unsigned long)page_address(page);
lmi = (vaddr - (unsigned long)__kfence_pool) >> PAGE_SHIFT;

/* Ideally this should never happen */
if (lmi >= linear_map_kf_hash_count) {
WARN_ON_ONCE(1);
continue;
}

if (enable)
kernel_map_linear_page(vaddr, lmi,
linear_map_kf_hash_slots,
&linear_map_kf_hash_lock);
else
kernel_unmap_linear_page(vaddr, lmi,
linear_map_kf_hash_slots,
&linear_map_kf_hash_lock);
}
local_irq_restore(flags);
return 0;
}
static inline void hash_debug_pagealloc_alloc_slots(void) {}
static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#else
static inline void hash_kfence_alloc_pool(void) {}
static inline void hash_kfence_map_pool(void) {}
static inline void hash_kfence_add_slot(phys_addr_t paddr, int slot) {}
static int __maybe_unused
hash_kfence_map_pages(struct page *page, int numpages, int enable)
{
return 0;
}
#endif

#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
int hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
void *vaddr = page_address(page);

if (is_kfence_address(vaddr))
return hash_kfence_map_pages(page, numpages, enable);
else
return hash_debug_pagealloc_map_pages(page, numpages, enable);
}

static void hash_linear_map_add_slot(phys_addr_t paddr, int slot)
{
if (is_kfence_address(__va(paddr)))
hash_kfence_add_slot(paddr, slot);
else
hash_debug_pagealloc_add_slot(paddr, slot);
}
#else
static void hash_linear_map_add_slot(phys_addr_t paddr, int slot) {}
#endif

/*
* 'R' and 'C' update notes:
Expand Down Expand Up @@ -559,7 +692,8 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
break;

cond_resched();
hash_debug_pagealloc_add_slot(paddr, ret);
/* add slot info in debug_pagealloc / kfence linear map */
hash_linear_map_add_slot(paddr, ret);
}
return ret < 0 ? ret : 0;
}
Expand Down Expand Up @@ -940,7 +1074,7 @@ static void __init htab_init_page_sizes(void)
bool aligned = true;
init_hpte_page_sizes();

if (!debug_pagealloc_enabled()) {
if (!debug_pagealloc_enabled_or_kfence()) {
/*
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
Expand Down Expand Up @@ -1261,6 +1395,7 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);

hash_debug_pagealloc_alloc_slots();
hash_kfence_alloc_pool();
/* create bolted the linear mapping in the hash table */
for_each_mem_range(i, &base, &end) {
size = end - base;
Expand All @@ -1277,6 +1412,7 @@ static void __init htab_initialize(void)
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
prot, mmu_linear_psize, mmu_kernel_ssize));
}
hash_kfence_map_pool();
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);

/*
Expand Down

0 comments on commit 8fec58f

Please sign in to comment.