Skip to content

Commit

Permalink
powerpc/mm: don't use pte_alloc_kernel() until slab is available on P…
Browse files Browse the repository at this point in the history
…PC32

In the same way as PPC64, implement early allocation functions and
avoid calling pte_alloc_kernel() before slab is available.

Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Christophe Leroy authored and Michael Ellerman committed May 2, 2019
1 parent 627f06c commit 4a6d8cf
Showing 1 changed file with 28 additions and 6 deletions.
34 changes: 28 additions & 6 deletions arch/powerpc/mm/pgtable_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,8 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */

extern char etext[], _stext[], _sinittext[], _einittext[];

__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
if (!slab_is_available())
return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);

return (pte_t *)pte_fragment_alloc(mm, 1);
}

Expand Down Expand Up @@ -205,7 +202,29 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);

int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
static void __init *early_alloc_pgtable(unsigned long size)
{
void *ptr = memblock_alloc(size, size);

if (!ptr)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, size, size);

return ptr;
}

static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
{
if (pmd_none(*pmdp)) {
pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);

pmd_populate_kernel(&init_mm, pmdp, ptep);
}
return pte_offset_kernel(pmdp, va);
}


int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
{
pmd_t *pd;
pte_t *pg;
Expand All @@ -214,7 +233,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va);
if (likely(slab_is_available()))
pg = pte_alloc_kernel(pd, va);
else
pg = early_pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the
Expand Down

0 comments on commit 4a6d8cf

Please sign in to comment.