Skip to content

Commit

Permalink
powerpc/8xx: Map linear memory with huge pages
Browse files Browse the repository at this point in the history
Map linear memory space with 512k and 8M pages whenever
possible.

Three mappings are performed:
- One for kernel text
- One for RO data
- One for the rest

Separating the mappings is done to be able to update the
protection later when using STRICT_KERNEL_RWX.

The ITLB miss handler now need to also handle huge TLBs
unless kernel text in pinned.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c44f0ab5510474f25123d904cd1f4e5c6aa3c1ac.1589866984.git.christophe.leroy@csgroup.eu
  • Loading branch information
Christophe Leroy authored and Michael Ellerman committed May 26, 2020
1 parent a623bb5 commit cf20995
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 3 deletions.
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/head_8xx.S
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ InstructionTLBMiss:
3:
mtcr r11
#endif
#ifdef CONFIG_HUGETLBFS
#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MD_TWC, r11
#else
Expand All @@ -234,7 +234,7 @@ InstructionTLBMiss:
#endif
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
#ifdef CONFIG_HUGETLBFS
#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
mtspr SPRN_MI_TWC, r11
#endif
Expand Down
50 changes: 49 additions & 1 deletion arch/powerpc/mm/nohash/8xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,20 +127,68 @@ void __init mmu_mapin_immr(void)
PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}

static void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
pgprot_t prot, bool new)
{
unsigned long v = PAGE_OFFSET + offset;
unsigned long p = offset;

WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));

for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);

if (!new)
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
}

unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);

WARN_ON(top < einittext8);

mmu_mapin_immr();

return 0;
if (__map_without_ltlbs)
return 0;

mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);

if (top > SZ_32M)
memblock_set_current_limit(top);

block_mapped_ram = top;

return top;
}

void mmu_mark_initmem_nx(void)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);

mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
}

#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);

mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
}
#endif

Expand Down

0 comments on commit cf20995

Please sign in to comment.