From 03c1e883aba3c150eff369d1c2fba0970e1e2927 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 17 Nov 2009 21:05:31 +0000 Subject: [PATCH] --- yaml --- r: 181095 b: refs/heads/master c: 8eda55142080f0373b1f0268fe6d6807f193e713 h: refs/heads/master i: 181093: 5b2362acce57c0394e1f13cf7a2ea1e7fe192a83 181091: 34b557dbd172172e3213d4140672363c44c44d05 181087: ccde26564abc4c905f1ceeebe8a250245ae2956a v: v3 --- [refs] | 2 +- trunk/arch/sh/include/asm/pgtable_32.h | 4 + trunk/arch/sh/include/asm/tlb.h | 16 ++ .../arch/sh/include/cpu-sh4/cpu/mmu_context.h | 4 + trunk/arch/sh/kernel/cpu/fpu.c | 2 - trunk/arch/sh/kernel/head_32.S | 241 ------------------ trunk/arch/sh/mm/pmb.c | 156 ++++-------- trunk/arch/sh/mm/tlb-pteaex.c | 66 +++++ trunk/arch/sh/mm/tlb-sh4.c | 66 +++++ 9 files changed, 208 insertions(+), 349 deletions(-) diff --git a/[refs] b/[refs] index f922bc7c949e..ab75ec86515a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4291b730cd0f0cf98a90d946b6cabbd804397350 +refs/heads/master: 8eda55142080f0373b1f0268fe6d6807f193e713 diff --git a/trunk/arch/sh/include/asm/pgtable_32.h b/trunk/arch/sh/include/asm/pgtable_32.h index 5003ee86f67b..c573d45f1286 100644 --- a/trunk/arch/sh/include/asm/pgtable_32.h +++ b/trunk/arch/sh/include/asm/pgtable_32.h @@ -71,6 +71,8 @@ #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ +#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */ + /* Wrapper for extended mode pgprot twiddling */ #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) @@ -164,6 +166,8 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ _PAGE_DIRTY | _PAGE_SPECIAL) +#define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED)) + #ifndef __ASSEMBLY__ #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ diff --git a/trunk/arch/sh/include/asm/tlb.h b/trunk/arch/sh/include/asm/tlb.h index da8fe7ab8728..3ed2f7a05416 100644 --- a/trunk/arch/sh/include/asm/tlb.h +++ b/trunk/arch/sh/include/asm/tlb.h @@ -97,6 +97,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) #define tlb_migrate_finish(mm) do { } while (0) +#ifdef CONFIG_CPU_SH4 +extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); +extern void tlb_unwire_entry(void); +#else +static inline void tlb_wire_entry(struct vm_area_struct *vma , + unsigned long addr, pte_t pte) +{ + BUG(); +} + +static inline void tlb_unwire_entry(void) +{ + BUG(); +} +#endif /* CONFIG_CPU_SH4 */ + #else /* CONFIG_MMU */ #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/trunk/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/trunk/arch/sh/include/cpu-sh4/cpu/mmu_context.h index 3ce7ef6c2978..03ea75c5315d 100644 --- a/trunk/arch/sh/include/cpu-sh4/cpu/mmu_context.h +++ b/trunk/arch/sh/include/cpu-sh4/cpu/mmu_context.h @@ -25,6 +25,10 @@ #define MMUCR_TI (1<<2) +#define MMUCR_URB 0x00FC0000 +#define MMUCR_URB_SHIFT 18 +#define MMUCR_URB_NENTRIES 64 + #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) #define MMUCR_SE (1 << 4) #else diff --git a/trunk/arch/sh/kernel/cpu/fpu.c b/trunk/arch/sh/kernel/cpu/fpu.c index f059ed62cf57..c23e6727002a 100644 --- a/trunk/arch/sh/kernel/cpu/fpu.c +++ b/trunk/arch/sh/kernel/cpu/fpu.c @@ -56,7 +56,6 @@ void fpu_state_restore(struct pt_regs *regs) } if (!tsk_used_math(tsk)) { - local_irq_enable(); /* * does a slab alloc which can sleep */ @@ -67,7 +66,6 @@ void fpu_state_restore(struct pt_regs *regs) do_group_exit(SIGKILL); return; } - local_irq_disable(); } grab_fpu(regs); diff --git a/trunk/arch/sh/kernel/head_32.S b/trunk/arch/sh/kernel/head_32.S index 8ee31a0b973e..e5d421db4c83 100644 --- a/trunk/arch/sh/kernel/head_32.S +++ b/trunk/arch/sh/kernel/head_32.S @@ -3,7 +3,6 @@ * arch/sh/kernel/head.S * * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima - * Copyright (C) 2010 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -85,236 +84,6 @@ ENTRY(_stext) ldc r0, r7_bank ! ... and initial thread_info #endif -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) - /* - * Reconfigure the initial PMB mappings setup by the hardware. - * - * When we boot in 32-bit MMU mode there are 2 PMB entries already - * setup for us. - * - * Entry VPN PPN V SZ C UB WT - * --------------------------------------------------------------- - * 0 0x80000000 0x00000000 1 512MB 1 0 1 - * 1 0xA0000000 0x00000000 1 512MB 0 0 0 - * - * But we reprogram them here because we want complete control over - * our address space and the initial mappings may not map PAGE_OFFSET - * to __MEMORY_START (or even map all of our RAM). - * - * Once we've setup cached and uncached mappings for all of RAM we - * clear the rest of the PMB entries. - * - * This clearing also deals with the fact that PMB entries can persist - * across reboots. The PMB could have been left in any state when the - * reboot occurred, so to be safe we clear all entries and start with - * with a clean slate. - */ - - mov.l .LMMUCR, r1 /* Flush the TLB */ - mov.l @r1, r0 - or #MMUCR_TI, r0 - mov.l r0, @r1 - - mov.l .LMEMORY_SIZE, r5 - mov r5, r7 - - mov #PMB_E_SHIFT, r0 - mov #0x1, r4 - shld r0, r4 - - mov.l .LFIRST_DATA_ENTRY, r0 - mov.l .LPMB_DATA, r1 - mov.l .LFIRST_ADDR_ENTRY, r2 - mov.l .LPMB_ADDR, r3 - - mov #0, r10 - - /* - * r0 = PMB_DATA data field - * r1 = PMB_DATA address field - * r2 = PMB_ADDR data field - * r3 = PMB_ADDR address field - * r4 = PMB_E_SHIFT - * r5 = remaining amount of RAM to map - * r6 = PMB mapping size we're trying to use - * r7 = cached_to_uncached - * r8 = scratch register - * r9 = scratch register - * r10 = number of PMB entries we've setup - */ -.L512: - mov #(512 >> 4), r6 - shll16 r6 - shll8 r6 - - cmp/hi r5, r6 - bt .L128 - - mov #(PMB_SZ_512M >> 2), r9 - shll2 r9 - - /* - * Cached mapping - */ - mov #PMB_C, r8 - or r0, r8 - or r9, r8 - mov.l r8, @r1 - mov.l r2, @r3 - - add r4, r1 /* Increment to the next PMB_DATA entry */ - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - add #1, r10 /* Increment number of PMB entries */ - - /* - * Uncached mapping - */ - mov #(PMB_UB >> 8), r8 - shll8 r8 - - or r0, r8 - or r9, r8 - mov.l r8, @r1 - mov r2, r8 - add r7, r8 - mov.l r8, @r3 - - add r4, r1 /* Increment to the next PMB_DATA entry */ - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - add #1, r10 /* Increment number of PMB entries */ - - sub r6, r5 - add r6, r0 - add r6, r2 - - bra .L512 - -.L128: - mov #(128 >> 4), r6 - shll16 r6 - shll8 r6 - - cmp/hi r5, r6 - bt .L64 - - mov #(PMB_SZ_128M >> 2), r9 - shll2 r9 - - /* - * Cached mapping - */ - mov #PMB_C, r8 - or r0, r8 - or r9, r8 - mov.l r8, @r1 - mov.l r2, @r3 - - add r4, r1 /* Increment to the next PMB_DATA entry */ - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - add #1, r10 /* Increment number of PMB entries */ - - /* - * Uncached mapping - */ - mov #(PMB_UB >> 8), r8 - shll8 r8 - - or r0, r8 - or r9, r8 - mov.l r8, @r1 - mov r2, r8 - add r7, r8 - mov.l r8, @r3 - - add r4, r1 /* Increment to the next PMB_DATA entry */ - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - add #1, r10 /* Increment number of PMB entries */ - - sub r6, r5 - add r6, r0 - add r6, r2 - - bra .L128 - -.L64: - mov #(64 >> 4), r6 - shll16 r6 - shll8 r6 - - cmp/hi r5, r6 - bt .Ldone - - mov #(PMB_SZ_64M >> 2), r9 - shll2 r9 - - /* - * Cached mapping - */ - mov #PMB_C, r8 - or r0, r8 - or r9, r8 - mov.l r8, @r1 - mov.l r2, @r3 - - add r4, r1 /* Increment to the next PMB_DATA entry */ - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - add #1, r10 /* Increment number of PMB entries */ - - /* - * Uncached mapping - */ - mov #(PMB_UB >> 8), r8 - shll8 r8 - - or r0, r8 - or r9, r8 - mov.l r8, @r1 - mov r2, r8 - add r7, r8 - mov.l r8, @r3 - - add r4, r1 /* Increment to the next PMB_DATA entry */ - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - add #1, r10 /* Increment number of PMB entries */ - - sub r6, r5 - add r6, r0 - add r6, r2 - - bra .L64 - -.Ldone: - /* Update cached_to_uncached */ - mov.l .Lcached_to_uncached, r0 - mov.l r7, @r0 - - /* - * Clear the remaining PMB entries. - * - * r3 = entry to begin clearing from - * r10 = number of entries we've setup so far - */ - mov #0, r1 - mov #PMB_ENTRY_MAX, r0 - -.Lagain: - mov.l r1, @r3 /* Clear PMB_ADDR entry */ - add #1, r10 /* Increment the loop counter */ - cmp/eq r0, r10 - bf/s .Lagain - add r4, r3 /* Increment to the next PMB_ADDR entry */ - - mov.l 6f, r0 - icbi @r0 - -#endif /* !CONFIG_PMB_LEGACY */ - #ifndef CONFIG_SH_NO_BSS_INIT /* * Don't clear BSS if running on slow platforms such as an RTL simulation, @@ -364,13 +133,3 @@ ENTRY(stack_start) 5: .long start_kernel 6: .long sh_cpu_init 7: .long init_thread_union - -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) -.LPMB_ADDR: .long PMB_ADDR -.LPMB_DATA: .long PMB_DATA -.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V -.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V -.LMMUCR: .long MMUCR -.Lcached_to_uncached: .long cached_to_uncached -.LMEMORY_SIZE: .long __MEMORY_SIZE -#endif diff --git a/trunk/arch/sh/mm/pmb.c b/trunk/arch/sh/mm/pmb.c index b796b6c021b4..8f7dbf183fb0 100644 --- a/trunk/arch/sh/mm/pmb.c +++ b/trunk/arch/sh/mm/pmb.c @@ -3,8 +3,11 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005 - 2010 Paul Mundt - * Copyright (C) 2010 Matt Fleming + * Copyright (C) 2005 - 2010 Paul Mundt + * + * P1/P2 Section mapping definitions from map32.h, which was: + * + * Copyright 2003 (c) Lineo Solutions,Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -277,82 +280,46 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } #ifdef CONFIG_PMB_LEGACY -static inline unsigned int pmb_ppn_in_range(unsigned long ppn) -{ - return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; -} - static int pmb_apply_legacy_mappings(void) { - unsigned int applied = 0; int i; + unsigned long addr, data; + unsigned int applied = 0; - pr_info("PMB: Preserving legacy mappings:\n"); - - /* - * The following entries are setup by the bootloader. - * - * Entry VPN PPN V SZ C UB - * -------------------------------------------------------- - * 0 0xA0000000 0x00000000 1 64MB 0 0 - * 1 0xA4000000 0x04000000 1 16MB 0 0 - * 2 0xA6000000 0x08000000 1 16MB 0 0 - * 9 0x88000000 0x48000000 1 128MB 1 1 - * 10 0x90000000 0x50000000 1 128MB 1 1 - * 11 0x98000000 0x58000000 1 128MB 1 1 - * 13 0xA8000000 0x48000000 1 128MB 0 0 - * 14 0xB0000000 0x50000000 1 128MB 0 0 - * 15 0xB8000000 0x58000000 1 128MB 0 0 - * - * The only entries the we need are the ones that map the kernel - * at the cached and uncached addresses. - */ for (i = 0; i < PMB_ENTRY_MAX; i++) { - unsigned long addr, data; - unsigned long addr_val, data_val; - unsigned long ppn, vpn; + struct pmb_entry *pmbe; + unsigned long vpn, ppn, flags; - addr = mk_pmb_addr(i); - data = mk_pmb_data(i); + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; - addr_val = __raw_readl(addr); - data_val = __raw_readl(data); + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); - /* - * Skip over any bogus entries - */ - if (!(data_val & PMB_V) || !(addr_val & PMB_V)) - continue; + ppn = data & PMB_PFN_MASK; - ppn = data_val & PMB_PFN_MASK; - vpn = addr_val & PMB_PFN_MASK; + flags = data & (PMB_C | PMB_WT | PMB_UB); + flags |= data & PMB_SZ_MASK; - /* - * Only preserve in-range mappings. - */ - if (pmb_ppn_in_range(ppn)) { - unsigned int size; - char *sz_str = NULL; - - size = data_val & PMB_SZ_MASK; - - sz_str = (size == PMB_SZ_16M) ? " 16MB": - (size == PMB_SZ_64M) ? " 64MB": - (size == PMB_SZ_128M) ? "128MB": - "512MB"; - - pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, - (data_val & PMB_C) ? "" : "un"); - - applied++; - } else { - /* - * Invalidate anything out of bounds. - */ - __raw_writel(addr_val & ~PMB_V, addr); - __raw_writel(data_val & ~PMB_V, data); - } + addr = PMB_ADDR + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + + vpn = data & PMB_PFN_MASK; + + pmbe = pmb_alloc(vpn, ppn, flags, i); + WARN_ON(IS_ERR(pmbe)); + + applied++; } return (applied == 0); @@ -366,9 +333,8 @@ static inline int pmb_apply_legacy_mappings(void) int __uses_jump_to_uncached pmb_init(void) { - int i; - unsigned long addr, data; - unsigned long ret; + unsigned int i; + unsigned long size, ret; jump_to_uncached(); @@ -386,46 +352,26 @@ int __uses_jump_to_uncached pmb_init(void) } /* - * Sync our software copy of the PMB mappings with those in - * hardware. The mappings in the hardware PMB were either set up - * by the bootloader or very early on by the kernel. + * Insert PMB entries for the P1 and P2 areas so that, after + * we've switched the MMU to 32-bit mode, the semantics of P1 + * and P2 are the same as in 29-bit mode, e.g. + * + * P1 - provides a cached window onto physical memory + * P2 - provides an uncached window onto physical memory */ - for (i = 0; i < PMB_ENTRY_MAX; i++) { - struct pmb_entry *pmbe; - unsigned long vpn, ppn, flags; + size = (unsigned long)__MEMORY_START + __MEMORY_SIZE; - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - if (!(data & PMB_V)) - continue; + ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); + BUG_ON(ret != size); - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - ctrl_outl(data, addr); - - ppn = data & PMB_PFN_MASK; - - flags = data & (PMB_C | PMB_WT | PMB_UB); - flags |= data & PMB_SZ_MASK; - - addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - - vpn = data & PMB_PFN_MASK; - - pmbe = pmb_alloc(vpn, ppn, flags, i); - WARN_ON(IS_ERR(pmbe)); - } + ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); + BUG_ON(ret != size); ctrl_outl(0, PMB_IRMCR); + /* PMB.SE and UB[7] */ + ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); + /* Flush out the TLB */ i = ctrl_inl(MMUCR); i |= MMUCR_TI; diff --git a/trunk/arch/sh/mm/tlb-pteaex.c b/trunk/arch/sh/mm/tlb-pteaex.c index 409b7c2b4b9d..88c8bb05e16d 100644 --- a/trunk/arch/sh/mm/tlb-pteaex.c +++ b/trunk/arch/sh/mm/tlb-pteaex.c @@ -76,3 +76,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); back_to_cached(); } + +/* + * Load the entry for 'addr' into the TLB and wire the entry. + */ +void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + /* Load the entry into the TLB */ + __update_tlb(vma, addr, pte); + + /* ... and wire it up. */ + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to wire the last TLB entry slot. + */ + BUG_ON(!--urb); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} + +/* + * Unwire the last wired TLB entry. + * + * It should also be noted that it is not possible to wire and unwire + * TLB entries in an arbitrary order. If you wire TLB entry N, followed + * by entry N+1, you must unwire entry N+1 first, then entry N. In this + * respect, it works like a stack or LIFO queue. + */ +void tlb_unwire_entry(void) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to unwire a TLB entry when none + * have been wired. + */ + BUG_ON(urb++ == MMUCR_URB_NENTRIES); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} diff --git a/trunk/arch/sh/mm/tlb-sh4.c b/trunk/arch/sh/mm/tlb-sh4.c index 8cf550e2570f..4c6234743318 100644 --- a/trunk/arch/sh/mm/tlb-sh4.c +++ b/trunk/arch/sh/mm/tlb-sh4.c @@ -81,3 +81,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, ctrl_outl(data, addr); back_to_cached(); } + +/* + * Load the entry for 'addr' into the TLB and wire the entry. + */ +void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + /* Load the entry into the TLB */ + __update_tlb(vma, addr, pte); + + /* ... and wire it up. */ + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to wire the last TLB entry slot. + */ + BUG_ON(!--urb); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} + +/* + * Unwire the last wired TLB entry. + * + * It should also be noted that it is not possible to wire and unwire + * TLB entries in an arbitrary order. If you wire TLB entry N, followed + * by entry N+1, you must unwire entry N+1 first, then entry N. In this + * respect, it works like a stack or LIFO queue. + */ +void tlb_unwire_entry(void) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to unwire a TLB entry when none + * have been wired. + */ + BUG_ON(urb++ == MMUCR_URB_NENTRIES); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +}