Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 96821
b: refs/heads/master
c: cec08e7
h: refs/heads/master
i:
  96819: 4c3fc0a
v: v3
  • Loading branch information
Benjamin Herrenschmidt authored and Paul Mackerras committed May 15, 2008
1 parent af0cc7a commit 0bfc538
Show file tree
Hide file tree
Showing 7 changed files with 66 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 08fcf1d61193d7b7779aa6d7388535e26e064a0b
refs/heads/master: cec08e7a948326b01555be6311480aa08e637de2
28 changes: 26 additions & 2 deletions trunk/arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
int mmu_vmemmap_psize = MMU_PAGE_4K;
#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
Expand Down Expand Up @@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
}
#endif /* CONFIG_PPC_64K_PAGES */

#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* We try to use 16M pages for vmemmap if that is supported
* and we have at least 1G of RAM at boot
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
lmb_phys_mem_size() >= 0x40000000)
mmu_vmemmap_psize = MMU_PAGE_16M;
else if (mmu_psize_defs[MMU_PAGE_64K].shift)
mmu_vmemmap_psize = MMU_PAGE_64K;
else
mmu_vmemmap_psize = MMU_PAGE_4K;
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

printk(KERN_DEBUG "Page orders: linear mapping = %d, "
"virtual = %d, io = %d\n",
"virtual = %d, io = %d"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
", vmemmap = %d"
#endif
"\n",
mmu_psize_defs[mmu_linear_psize].shift,
mmu_psize_defs[mmu_virtual_psize].shift,
mmu_psize_defs[mmu_io_psize].shift);
mmu_psize_defs[mmu_io_psize].shift
#ifdef CONFIG_SPARSEMEM_VMEMMAP
,mmu_psize_defs[mmu_vmemmap_psize].shift
#endif
);

#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
Expand Down
10 changes: 6 additions & 4 deletions trunk/arch/powerpc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
*
*/

#undef DEBUG

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
Expand Down Expand Up @@ -208,12 +210,12 @@ int __meminit vmemmap_populated(unsigned long start, int page_size)
}

int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;

Expand All @@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page,
start, p, __pa(p));

mapped = htab_bolt_mapping(start, start + page_size,
__pa(p), mode_rw, mmu_linear_psize,
__pa(p), mode_rw, mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}

return 0;
}
#endif
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
16 changes: 14 additions & 2 deletions trunk/arch/powerpc/mm/slb.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
#include <asm/udbg.h>

#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#define DBG(fmt...) printk(fmt)
#else
#define DBG pr_debug
#endif
Expand Down Expand Up @@ -263,13 +263,19 @@ void slb_initialize(void)
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
extern unsigned int *slb_miss_kernel_load_vmemmap;
unsigned long vmemmap_llp;
#endif

/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;

#ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
Expand All @@ -281,6 +287,12 @@ void slb_initialize(void)

DBG("SLB: linear LLP = %04lx\n", linear_llp);
DBG("SLB: io LLP = %04lx\n", io_llp);

#ifdef CONFIG_SPARSEMEM_VMEMMAP
patch_slb_encoding(slb_miss_kernel_load_vmemmap,
SLB_VSID_KERNEL | vmemmap_llp);
DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}

get_paca()->stab_rr = SLB_NUM_BOLTED;
Expand Down
16 changes: 13 additions & 3 deletions trunk/arch/powerpc/mm/slb_low.S
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode)
* it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */

/* Check if hitting the linear mapping of the vmalloc/ioremap
* kernel space
/* Check if hitting the linear mapping or some other kernel space
*/
bne cr7,1f

Expand All @@ -62,7 +61,18 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
b slb_finish_load_1T

1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
1:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Check virtual memmap region. To be patches at kernel boot */
cmpldi cr0,r9,0xf
bne 1f
_GLOBAL(slb_miss_kernel_load_vmemmap)
li r11,0
b 6f
1:
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

/* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
BEGIN_FTR_SECTION
Expand Down
1 change: 1 addition & 0 deletions trunk/include/asm-powerpc/mmu-hash64.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
Expand Down
10 changes: 5 additions & 5 deletions trunk/include/asm-powerpc/pgtable-ppc64.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,15 @@

#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
#define VMEMMAP_REGION_ID (0xfUL)
#define USER_REGION_ID (0UL)

/*
* Defines the address of the vmemap area, in the top 16th of the
* kernel region.
* Defines the address of the vmemap area, in its own region
*/
#define VMEMMAP_BASE (ASM_CONST(CONFIG_KERNEL_START) + \
(0xfUL << (REGION_SHIFT - 4)))
#define vmemmap ((struct page *)VMEMMAP_BASE)
#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
#define vmemmap ((struct page *)VMEMMAP_BASE)


/*
* Common bits in a linux-style PTE. These match the bits in the
Expand Down

0 comments on commit 0bfc538

Please sign in to comment.