Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 26161
b: refs/heads/master
c: 2fd8303
h: refs/heads/master
i:
  26159: 8c573ec
v: v3
  • Loading branch information
Helge Deller authored and Kyle McMartin committed Apr 21, 2006
1 parent 7448f9d commit 19e068b
Show file tree
Hide file tree
Showing 12 changed files with 199 additions and 103 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d668da80d613def981c573354e1853e38bd0698d
refs/heads/master: 2fd83038160531245099c3c5b3511fa4b80765eb
31 changes: 31 additions & 0 deletions trunk/arch/parisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,37 @@ config 64BIT
enable this option otherwise. The 64bit kernel is significantly bigger
and slower than the 32bit one.

choice
prompt "Kernel page size"
default PARISC_PAGE_SIZE_4KB if !64BIT
default PARISC_PAGE_SIZE_4KB if 64BIT
# default PARISC_PAGE_SIZE_16KB if 64BIT

config PARISC_PAGE_SIZE_4KB
bool "4KB"
help
This lets you select the page size of the kernel. For best
performance, a page size of 16KB is recommended. For best
compatibility with 32bit applications, a page size of 4KB should be
selected (the vast majority of 32bit binaries work perfectly fine
with a larger page size).

4KB For best 32bit compatibility
16KB For best performance
64KB For best performance, might give more overhead.

If you don't know what to do, choose 4KB.

config PARISC_PAGE_SIZE_16KB
bool "16KB (EXPERIMENTAL)"
depends on PA8X00 && EXPERIMENTAL

config PARISC_PAGE_SIZE_64KB
bool "64KB (EXPERIMENTAL)"
depends on PA8X00 && EXPERIMENTAL

endchoice

config SMP
bool "Symmetric multi-processing support"
---help---
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/parisc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,11 @@ int main(void)
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64);
DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128);
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
Expand Down
36 changes: 22 additions & 14 deletions trunk/arch/parisc/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -502,18 +502,20 @@
* all ILP32 processes and all the kernel for machines with
* under 4GB of memory) */
.macro L3_ptep pgd,pte,index,va,fault
#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
copy %r0,\pte
extrd,u,*= \va,31,32,%r0
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd
extrd,u,*= \va,31,32,%r0
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
extrd,u,*= \va,31,32,%r0
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
shld \pgd,PxD_VALUE_SHIFT,\index
extrd,u,*= \va,31,32,%r0
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
copy \index,\pgd
extrd,u,*<> \va,31,32,%r0
extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
#endif
L2_ptep \pgd,\pte,\index,\va,\fault
.endm

Expand Down Expand Up @@ -563,10 +565,18 @@
extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */

/* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */
/* Enforce uncacheable pages.
* This should ONLY be use for MMIO on PA 2.0 machines.
* Memory/DMA is cache coherent on all PA2.0 machines we support
* (that means T-class is NOT supported) and the memory controllers
* on most of those machines only handles cache transactions.
*/
extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
depi 1,12,1,\prot

depd %r0,63,PAGE_SHIFT,\pte
extrd,s \pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
.endm

/* Identical macro to make_insert_tlb above, except it
Expand All @@ -584,9 +594,8 @@

/* Get rid of prot bits and convert to page addr for iitlba */

depi 0,31,PAGE_SHIFT,\pte
depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
extru \pte,24,25,\pte

.endm

/* This is for ILP32 PA2.0 only. The TLB insertion needs
Expand Down Expand Up @@ -1201,10 +1210,9 @@ intr_save:
*/

/* adjust isr/ior. */

extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
depd %r1,31,7,%r17 /* deposit them into ior */
depdi 0,63,7,%r16 /* clear them from isr */
extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
#endif
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
Expand Down
15 changes: 8 additions & 7 deletions trunk/arch/parisc/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -76,16 +76,16 @@ $bss_loop:
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */

#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
#else
/* 2-level page table, so pmd == pgd */
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif

/* Fill in pmd with enough pte directories */
Expand All @@ -99,21 +99,22 @@ $bss_loop:
stw %r3,0(%r4)
ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif


/* Now initialize the PTEs themselves */
ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1

$pgt_fill_loop:
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
ldo ASM_PAGE_SIZE(%r3),%r3
bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
addib,> -1,%r11,$pgt_fill_loop
nop

/* Load the return address...er...crash 'n burn */
Expand Down
10 changes: 5 additions & 5 deletions trunk/arch/parisc/kernel/init_task.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,17 +53,17 @@ union thread_union init_thread_union
__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };

#ifdef __LP64__
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, };

pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, };
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, };

pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE)));

/*
* Initial task structure.
Expand Down
25 changes: 12 additions & 13 deletions trunk/arch/parisc/kernel/pacache.S
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ flush_tlb_all_local:
*/

/* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
rsm PSW_SM_I, %r19 /* save I-bit state */
rsm PSW_SM_I, %r19 /* save I-bit state */
load32 PA(1f), %r1
nop
nop
Expand All @@ -84,8 +84,7 @@ flush_tlb_all_local:
rfi
nop

1: ldil L%PA(cache_info), %r1
ldo R%PA(cache_info)(%r1), %r1
1: load32 PA(cache_info), %r1

/* Flush Instruction Tlb */

Expand Down Expand Up @@ -212,8 +211,7 @@ flush_instruction_cache_local:
.entry

mtsp %r0, %sr1
ldil L%cache_info, %r1
ldo R%cache_info(%r1), %r1
load32 cache_info, %r1

/* Flush Instruction Cache */

Expand Down Expand Up @@ -254,8 +252,7 @@ flush_data_cache_local:
.entry

mtsp %r0, %sr1
ldil L%cache_info, %r1
ldo R%cache_info(%r1), %r1
load32 cache_info, %r1

/* Flush Data Cache */

Expand Down Expand Up @@ -303,7 +300,8 @@ copy_user_page_asm:
*/

ldd 0(%r25), %r19
ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
ldi ASM_PAGE_SIZE_DIV128, %r1

ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
ldw 128(%r25), %r0 /* prefetch 2 */

Expand Down Expand Up @@ -368,7 +366,7 @@ copy_user_page_asm:
* use ldd/std on a 32 bit kernel.
*/
ldw 0(%r25), %r19
ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
ldi ASM_PAGE_SIZE_DIV64, %r1

1:
ldw 4(%r25), %r20
Expand Down Expand Up @@ -461,6 +459,7 @@ copy_user_page_asm:
sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */

ldil L%(TMPALIAS_MAP_START), %r28
/* FIXME for different page sizes != 4k */
#ifdef CONFIG_64BIT
extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
Expand Down Expand Up @@ -551,6 +550,7 @@ __clear_user_page_asm:
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
/* FIXME: page size dependend */
#endif
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
Expand All @@ -566,10 +566,10 @@ __clear_user_page_asm:
pdtlb 0(%r28)

#ifdef CONFIG_64BIT
ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
ldi ASM_PAGE_SIZE_DIV128, %r1

/* PREFETCH (Write) has not (yet) been proven to help here */
/* #define PREFETCHW_OP ldd 256(%0), %r0 */
/* #define PREFETCHW_OP ldd 256(%0), %r0 */

1: std %r0, 0(%r28)
std %r0, 8(%r28)
Expand All @@ -591,8 +591,7 @@ __clear_user_page_asm:
ldo 128(%r28), %r28

#else /* ! CONFIG_64BIT */

ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
ldi ASM_PAGE_SIZE_DIV64, %r1

1:
stw %r0, 0(%r28)
Expand Down
10 changes: 5 additions & 5 deletions trunk/arch/parisc/kernel/syscall.S
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
* pointers.
*/

.align 4096
.align ASM_PAGE_SIZE
linux_gateway_page:

/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
Expand Down Expand Up @@ -632,7 +632,7 @@ cas_action:
end_compare_and_swap:

/* Make sure nothing else is placed on this page */
.align 4096
.align ASM_PAGE_SIZE
.export end_linux_gateway_page
end_linux_gateway_page:

Expand All @@ -652,7 +652,7 @@ end_linux_gateway_page:

.section .rodata,"a"

.align 4096
.align ASM_PAGE_SIZE
/* Light-weight-syscall table */
/* Start of lws table. */
.export lws_table
Expand All @@ -662,14 +662,14 @@ lws_table:
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
/* End of lws table */

.align 4096
.align ASM_PAGE_SIZE
.export sys_call_table
.Lsys_call_table:
sys_call_table:
#include "syscall_table.S"

#ifdef CONFIG_64BIT
.align 4096
.align ASM_PAGE_SIZE
.export sys_call_table64
.Lsys_call_table64:
sys_call_table64:
Expand Down
Loading

0 comments on commit 19e068b

Please sign in to comment.