Skip to content

Commit

Permalink
book3s64/hash: Make kernel_map_linear_page() generic
Browse files Browse the repository at this point in the history
Currently kernel_map_linear_page() function assumes to be working on
linear_map_hash_slots array. But since in later patches we need a
separate linear map array for kfence, hence make
kernel_map_linear_page() take a linear map array and lock in it's
function argument.

This is needed to separate out kfence from debug_pagealloc
infrastructure.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/5b67df7b29e68d7c78d6fc1f42d41137299bac6b.1729271995.git.ritesh.list@gmail.com
  • Loading branch information
Ritesh Harjani (IBM) authored and Michael Ellerman committed Oct 23, 2024
1 parent 43919f4 commit 685d942
Showing 1 changed file with 25 additions and 22 deletions.
47 changes: 25 additions & 22 deletions arch/powerpc/mm/book3s64/hash_utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,11 +272,8 @@ void hash__tlbiel_all(unsigned int action)
}

#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);

static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
static void kernel_map_linear_page(unsigned long vaddr, unsigned long idx,
u8 *slots, raw_spinlock_t *lock)
{
unsigned long hash;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
Expand All @@ -290,44 +287,48 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
if (!vsid)
return;

if (linear_map_hash_slots[lmi] & 0x80)
if (slots[idx] & 0x80)
return;

ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);

BUG_ON (ret < 0);
raw_spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
linear_map_hash_slots[lmi] = ret | 0x80;
raw_spin_unlock(&linear_map_hash_lock);
raw_spin_lock(lock);
BUG_ON(slots[idx] & 0x80);
slots[idx] = ret | 0x80;
raw_spin_unlock(lock);
}

static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx,
u8 *slots, raw_spinlock_t *lock)
{
unsigned long hash, hidx, slot;
unsigned long hash, hslot, slot;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);

hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
raw_spin_lock(&linear_map_hash_lock);
if (!(linear_map_hash_slots[lmi] & 0x80)) {
raw_spin_unlock(&linear_map_hash_lock);
raw_spin_lock(lock);
if (!(slots[idx] & 0x80)) {
raw_spin_unlock(lock);
return;
}
hidx = linear_map_hash_slots[lmi] & 0x7f;
linear_map_hash_slots[lmi] = 0;
raw_spin_unlock(&linear_map_hash_lock);
if (hidx & _PTEIDX_SECONDARY)
hslot = slots[idx] & 0x7f;
slots[idx] = 0;
raw_spin_unlock(lock);
if (hslot & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
slot += hslot & _PTEIDX_GROUP_IX;
mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
mmu_linear_psize,
mmu_kernel_ssize, 0);
}

static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
static inline void hash_debug_pagealloc_alloc_slots(void)
{
if (!debug_pagealloc_enabled())
Expand Down Expand Up @@ -362,9 +363,11 @@ static int hash_debug_pagealloc_map_pages(struct page *page, int numpages,
if (lmi >= linear_map_hash_count)
continue;
if (enable)
kernel_map_linear_page(vaddr, lmi);
kernel_map_linear_page(vaddr, lmi,
linear_map_hash_slots, &linear_map_hash_lock);
else
kernel_unmap_linear_page(vaddr, lmi);
kernel_unmap_linear_page(vaddr, lmi,
linear_map_hash_slots, &linear_map_hash_lock);
}
local_irq_restore(flags);
return 0;
Expand Down

0 comments on commit 685d942

Please sign in to comment.