Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 161711
b: refs/heads/master
c: 32a7494
h: refs/heads/master
i:
  161709: ff99bfa
  161707: fe2775c
  161703: 587d1ea
  161695: e1ae089
v: v3
  • Loading branch information
Benjamin Herrenschmidt committed Aug 20, 2009
1 parent b8b2238 commit 1ac6c68
Show file tree
Hide file tree
Showing 7 changed files with 69 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 25d21ad6e799cccd097b9df2a2fefe19a7e1dfcf
refs/heads/master: 32a74949b7337726e76d69f51c48715431126c6c
1 change: 1 addition & 0 deletions trunk/arch/powerpc/include/asm/mmu-book3e.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
#endif

extern int mmu_linear_psize;
extern int mmu_vmemmap_psize;

#endif /* !__ASSEMBLY__ */

Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/powerpc/include/asm/pgtable-ppc64.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
/*
* The vmalloc space starts at the beginning of that region, and
* occupies half of it on hash CPUs and a quarter of it on Book3E
* (we keep a quarter for the virtual memmap)
*/
#define VMALLOC_START KERN_VIRT_START
#ifdef CONFIG_PPC_BOOK3E
Expand Down Expand Up @@ -83,7 +84,7 @@

#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
#define VMEMMAP_REGION_ID (0xfUL)
#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
#define USER_REGION_ID (0UL)

/*
Expand Down
55 changes: 48 additions & 7 deletions trunk/arch/powerpc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,47 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
return 0;
}

/* On hash-based CPUs, the vmemmap is bolted in the hash table.
*
* On Book3E CPUs, the vmemmap is currently mapped in the top half of
* the vmalloc space using normal page tables, though the size of
* pages encoded in the PTEs can be different
*/

#ifdef CONFIG_PPC_BOOK3E
static void __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys)
{
/* Create a PTE encoding without page size */
unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_KERNEL_RW;

/* PTEs only contain page size encodings up to 32M */
BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);

/* Encode the size in the PTE */
flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;

/* For each PTE for that area, map things. Note that we don't
* increment phys because all PTEs are of the large size and
* thus must have the low bits clear
*/
for (i = 0; i < page_size; i += PAGE_SIZE)
BUG_ON(map_kernel_page(start + i, phys, flags));
}
#else /* CONFIG_PPC_BOOK3E */
static void __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys)
{
int mapped = htab_bolt_mapping(start, start + page_size, phys,
PAGE_KERNEL, mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
#endif /* CONFIG_PPC_BOOK3E */

int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
{
Expand All @@ -215,8 +256,11 @@ int __meminit vmemmap_populate(struct page *start_page,
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);

pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
start_page, nr_pages, node);
pr_debug(" -> map %lx..%lx\n", start, end);

for (; start < end; start += page_size) {
int mapped;
void *p;

if (vmemmap_populated(start, page_size))
Expand All @@ -226,13 +270,10 @@ int __meminit vmemmap_populate(struct page *start_page,
if (!p)
return -ENOMEM;

pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
start, p, __pa(p));
pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p);

mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
pgprot_val(PAGE_KERNEL),
mmu_vmemmap_psize, mmu_kernel_ssize);
BUG_ON(mapped < 0);
vmemmap_create_mapping(start, page_size, __pa(p));
}

return 0;
Expand Down
7 changes: 6 additions & 1 deletion trunk/arch/powerpc/mm/mmu_decl.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,12 @@ extern unsigned int rtas_data, rtas_size;
struct hash_pte;
extern struct hash_pte *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
#endif

#endif /* CONFIG_PPC32 */

#ifdef CONFIG_PPC64
extern int map_kernel_page(unsigned long ea, unsigned long pa, int flags);
#endif /* CONFIG_PPC64 */

extern unsigned long ioremap_bot;
extern unsigned long __max_low_memory;
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/mm/pgtable_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static void *early_alloc_pgtable(unsigned long size)
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
static int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
{
pgd_t *pgdp;
pud_t *pudp;
Expand Down
11 changes: 10 additions & 1 deletion trunk/arch/powerpc/mm/tlb_nohash.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ static inline int mmu_get_tsize(int psize)

int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
unsigned long linear_map_top; /* Top of linear mapping */

Expand Down Expand Up @@ -356,10 +357,18 @@ static void __early_init_mmu(int boot_cpu)
unsigned int mas4;

/* XXX This will have to be decided at runtime, but right
* now our boot and TLB miss code hard wires it
* now our boot and TLB miss code hard wires it. Ideally
* we should find out a suitable page size and patch the
* TLB miss code (either that or use the PACA to store
* the value we want)
*/
mmu_linear_psize = MMU_PAGE_1G;

/* XXX This should be decided at runtime based on supported
* page sizes in the TLB, but for now let's assume 16M is
* always there and a good fit (which it probably is)
*/
mmu_vmemmap_psize = MMU_PAGE_16M;

/* Check if HW tablewalk is present, and if yes, enable it by:
*
Expand Down

0 comments on commit 1ac6c68

Please sign in to comment.