Skip to content

Commit

Permalink
book3s64/hash: Refactor kernel linear map related calls
Browse files Browse the repository at this point in the history
This just brings all linear map related handling at one place instead of
having those functions scattered in hash_utils file.
Makes it easy for review.

No functionality changes in this patch.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/56c610310aa50b5417976a39c5f15b78bc76c764.1729271995.git.ritesh.list@gmail.com
  • Loading branch information
Ritesh Harjani (IBM) authored and Michael Ellerman committed Oct 23, 2024
1 parent 47780e7 commit 8b10855
Showing 1 changed file with 82 additions and 82 deletions.
164 changes: 82 additions & 82 deletions arch/powerpc/mm/book3s64/hash_utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,88 @@ void hash__tlbiel_all(unsigned int action)
WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
}

#ifdef CONFIG_DEBUG_PAGEALLOC
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);

static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
long ret;

hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);

/* Don't create HPTE entries for bad address */
if (!vsid)
return;

if (linear_map_hash_slots[lmi] & 0x80)
return;

ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);

BUG_ON (ret < 0);
raw_spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
linear_map_hash_slots[lmi] = ret | 0x80;
raw_spin_unlock(&linear_map_hash_lock);
}

static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hidx, slot;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);

hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
raw_spin_lock(&linear_map_hash_lock);
if (!(linear_map_hash_slots[lmi] & 0x80)) {
raw_spin_unlock(&linear_map_hash_lock);
return;
}
hidx = linear_map_hash_slots[lmi] & 0x7f;
linear_map_hash_slots[lmi] = 0;
raw_spin_unlock(&linear_map_hash_lock);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
mmu_linear_psize,
mmu_kernel_ssize, 0);
}

int hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;

local_irq_save(flags);
for (i = 0; i < numpages; i++, page++) {
vaddr = (unsigned long)page_address(page);
lmi = __pa(vaddr) >> PAGE_SHIFT;
if (lmi >= linear_map_hash_count)
continue;
if (enable)
kernel_map_linear_page(vaddr, lmi);
else
kernel_unmap_linear_page(vaddr, lmi);
}
local_irq_restore(flags);
return 0;
}
#else /* CONFIG_DEBUG_PAGEALLOC */
int hash__kernel_map_pages(struct page *page, int numpages,
int enable)
{
return 0;
}
#endif /* CONFIG_DEBUG_PAGEALLOC */

/*
* 'R' and 'C' update notes:
* - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
Expand Down Expand Up @@ -2120,88 +2202,6 @@ void hpt_do_stress(unsigned long ea, unsigned long hpte_group)
}
}

#ifdef CONFIG_DEBUG_PAGEALLOC
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);

static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
long ret;

hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);

/* Don't create HPTE entries for bad address */
if (!vsid)
return;

if (linear_map_hash_slots[lmi] & 0x80)
return;

ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);

BUG_ON (ret < 0);
raw_spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
linear_map_hash_slots[lmi] = ret | 0x80;
raw_spin_unlock(&linear_map_hash_lock);
}

static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hidx, slot;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);

hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
raw_spin_lock(&linear_map_hash_lock);
if (!(linear_map_hash_slots[lmi] & 0x80)) {
raw_spin_unlock(&linear_map_hash_lock);
return;
}
hidx = linear_map_hash_slots[lmi] & 0x7f;
linear_map_hash_slots[lmi] = 0;
raw_spin_unlock(&linear_map_hash_lock);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
mmu_linear_psize,
mmu_kernel_ssize, 0);
}

int hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;

local_irq_save(flags);
for (i = 0; i < numpages; i++, page++) {
vaddr = (unsigned long)page_address(page);
lmi = __pa(vaddr) >> PAGE_SHIFT;
if (lmi >= linear_map_hash_count)
continue;
if (enable)
kernel_map_linear_page(vaddr, lmi);
else
kernel_unmap_linear_page(vaddr, lmi);
}
local_irq_restore(flags);
return 0;
}
#else /* CONFIG_DEBUG_PAGEALLOC */
int hash__kernel_map_pages(struct page *page, int numpages,
int enable)
{
return 0;
}
#endif /* CONFIG_DEBUG_PAGEALLOC */

void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
Expand Down

0 comments on commit 8b10855

Please sign in to comment.