Skip to content

Commit

Permalink
powerpc/mm: Move book3s64 specifics in subdirectory mm/book3s64
Browse files Browse the repository at this point in the history
Many files in arch/powerpc/mm are only for book3S64. This patch
creates a subdirectory for them.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Update the selftest sym links, shorten new filenames, cleanup some
      whitespace and formatting in the new files.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Christophe Leroy authored and Michael Ellerman committed May 2, 2019
1 parent 9d9f2cc commit 47d9994
Show file tree
Hide file tree
Showing 24 changed files with 126 additions and 92 deletions.
25 changes: 3 additions & 22 deletions arch/powerpc/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,53 +5,34 @@

ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)

CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)

obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
init-common.o mmu_context.o drmem.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \
$(hash64-y) mmu_context_book3s64.o \
pgtable-book3s64.o pgtable-frag.o
obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-frag.o
obj-$(CONFIG_PPC32) += pgtable-frag.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o
ifdef CONFIG_PPC_BOOK3S_64
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
endif
obj-$(CONFIG_PPC_BOOK3S_32) += tlb_hash32.o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-y += hugetlbpage.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_BOOK3S_64) += hugetlbpage-hash64.o
obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
endif
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o
obj-$(CONFIG_PPC_PTDUMP) += ptdump/
obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o

# Disable kcov instrumentation on sensitive code
# This is necessary for booting with kcov enabled on book3e machines
KCOV_INSTRUMENT_tlb_nohash.o := n
KCOV_INSTRUMENT_fsl_booke_mmu.o := n

# Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n
24 changes: 24 additions & 0 deletions arch/powerpc/mm/book3s64/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# SPDX-License-Identifier: GPL-2.0

ccflags-y := $(NO_MINIMAL_TOC)

CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)

obj-y += hash_pgtable.o hash_utils.o slb.o \
mmu_context.o pgtable.o hash_tlb.o
obj-$(CONFIG_PPC_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
endif
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o
obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o

# Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright IBM Corporation, 2015
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
* Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU Lesser General Public License
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright IBM Corporation, 2015
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
* Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU Lesser General Public License
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright IBM Corporation, 2013
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
* Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Search the Linux page table for a match with va */
vpn = hpt_vpn(ea, vsid, ssize);

/* At this point, we have a pte (old_pte) which can be used to build
/*
* At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases:
*
* 1. There is a valid (present) pte with no associated HPTE (this is
Expand All @@ -55,8 +56,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (unlikely(!check_pte_access(access, old_pte)))
return 1;

/* Try to lock the PTE, add ACCESSED and DIRTY if it was
* a write access */
/*
* Try to lock the PTE, add ACCESSED and DIRTY if it was
* a write access
*/
new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_WRITE)
new_pte |= _PAGE_DIRTY;
Expand All @@ -74,8 +77,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
rpte = __real_pte(__pte(old_pte), ptep, offset);

if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
/* No CPU has hugepages but lacks no execute, so we
* don't need to worry about that case */
/*
* No CPU has hugepages but lacks no execute, so we
* don't need to worry about that case
*/
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);

/* Check if pte already has an hpte (case 2) */
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,

i = batch->index;

/* Get page size (maybe move back to caller).
/*
* Get page size (maybe move back to caller).
*
* NOTE: when using special 64K mappings in 4K environment like
* for SPEs, we obtain the page size from the slice, which thus
Expand All @@ -77,10 +78,12 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
#endif
} else {
psize = pte_pagesize_index(mm, addr, pte);
/* Mask the address for the standard page size. If we
/*
* Mask the address for the standard page size. If we
* have a 64k page kernel, but the hardware does not
* support 64k pages, this might be different from the
* hardware page size encoded in the slice table. */
* hardware page size encoded in the slice table.
*/
addr &= PAGE_MASK;
offset = PTRS_PER_PTE;
}
Expand Down Expand Up @@ -161,7 +164,8 @@ void hash__tlb_flush(struct mmu_gather *tlb)
{
struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);

/* If there's a TLB batch pending, then we must flush it because the
/*
* If there's a TLB batch pending, then we must flush it because the
* pages are going to be freed and we really don't want to have a CPU
* access a freed page because it has a stale TLB
*/
Expand Down Expand Up @@ -201,7 +205,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,

BUG_ON(!mm->pgd);

/* Note: Normally, we should only ever use a batch within a
/*
* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference
Expand Down Expand Up @@ -238,7 +243,8 @@ void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
unsigned long flags;

addr = _ALIGN_DOWN(addr, PMD_SIZE);
/* Note: Normally, we should only ever use a batch within a
/*
* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,8 @@ static DEFINE_SPINLOCK(linear_map_hash_lock);
struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops);

/* There are definitions of page sizes arrays to be used when none
/*
* These are definitions of page sizes arrays to be used when none
* is provided by the firmware.
*/

Expand All @@ -145,7 +146,8 @@ static struct mmu_psize_def mmu_psize_defaults[] = {
},
};

/* POWER4, GPUL, POWER5
/*
* POWER4, GPUL, POWER5
*
* Support for 16Mb large pages
*/
Expand Down Expand Up @@ -479,7 +481,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
}

#ifdef CONFIG_HUGETLB_PAGE
/* Scan for 16G memory blocks that have been set aside for huge pages
/*
* Scan for 16G memory blocks that have been set aside for huge pages
* and reserve those blocks for 16G huge pages.
*/
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
Expand All @@ -496,8 +499,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0)
return 0;

/* This property is the log base 2 of the number of virtual pages that
* will represent this memory block. */
/*
* This property is the log base 2 of the number of virtual pages that
* will represent this memory block.
*/
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
if (page_count_prop == NULL)
return 0;
Expand Down Expand Up @@ -673,7 +678,8 @@ static void __init htab_init_page_sizes(void)
#endif /* CONFIG_PPC_64K_PAGES */

#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* We try to use 16M pages for vmemmap if that is supported
/*
* We try to use 16M pages for vmemmap if that is supported
* and we have at least 1G of RAM at boot
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
Expand Down Expand Up @@ -742,7 +748,8 @@ unsigned htab_shift_for_mem_size(unsigned long mem_size)

static unsigned long __init htab_get_table_size(void)
{
/* If hash size isn't already provided by the platform, we try to
/*
* If hash size isn't already provided by the platform, we try to
* retrieve it from the device-tree. If it's not there neither, we
* calculate it now based on the total RAM size
*/
Expand Down Expand Up @@ -1043,7 +1050,8 @@ void __init hash__early_init_mmu(void)
if (!mmu_hash_ops.hpte_insert)
panic("hash__early_init_mmu: No MMU hash ops defined!\n");

/* Initialize the MMU Hash table and create the linear mapping
/*
* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
*/
Expand Down Expand Up @@ -1228,7 +1236,8 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
}
}

/* Result code is:
/*
* Result code is:
* 0 - handled
* 1 - normal page fault
* -1 - critical hash insertion error
Expand Down Expand Up @@ -1276,8 +1285,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
ssize = mmu_kernel_ssize;
break;
default:
/* Not a valid range
* Send the problem up to do_page_fault
/*
* Not a valid range
* Send the problem up to do_page_fault()
*/
rc = 1;
goto bail;
Expand All @@ -1302,7 +1312,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
flags |= HPTE_LOCAL_UPDATE;

#ifndef CONFIG_PPC_64K_PAGES
/* If we use 4K pages and our psize is not 4K, then we might
/*
* If we use 4K pages and our psize is not 4K, then we might
* be hitting a special driver mapping, and need to align the
* address before we fetch the PTE.
*
Expand All @@ -1324,7 +1335,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
/* Add _PAGE_PRESENT to the required access perm */
access |= _PAGE_PRESENT;

/* Pre-check access permissions (will be re-checked atomically
/*
* Pre-check access permissions (will be re-checked atomically
* in __hash_page_XX but this pre-check is a fast path
*/
if (!check_pte_access(access, pte_val(*ptep))) {
Expand Down Expand Up @@ -1371,7 +1383,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
psize = MMU_PAGE_4K;
}

/* If this PTE is non-cacheable and we have restrictions on
/*
* If this PTE is non-cacheable and we have restrictions on
* using non cacheable large pages, then we switch to 4k
*/
if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
Expand Down Expand Up @@ -1412,7 +1425,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
flags, ssize, spp);
}

/* Dump some info in case of hash insertion failure, they should
/*
* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
*/
if (rc == -1)
Expand Down Expand Up @@ -1653,7 +1667,8 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
return gslot;
}

/* WARNING: This is called from hash_low_64.S, if you change this prototype,
/*
* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
Expand Down Expand Up @@ -1874,7 +1889,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
/* We don't currently support the first MEMBLOCK not mapping 0
/*
* We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors
*/
BUG_ON(first_memblock_base != 0);
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Loading

0 comments on commit 47d9994

Please sign in to comment.