Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 181095
b: refs/heads/master
c: 8eda551
h: refs/heads/master
i:
  181093: 5b2362a
  181091: 34b557d
  181087: ccde265
v: v3
  • Loading branch information
Matt Fleming committed Jan 16, 2010
1 parent 06d1885 commit 03c1e88
Show file tree
Hide file tree
Showing 9 changed files with 208 additions and 349 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4291b730cd0f0cf98a90d946b6cabbd804397350
refs/heads/master: 8eda55142080f0373b1f0268fe6d6807f193e713
4 changes: 4 additions & 0 deletions trunk/arch/sh/include/asm/pgtable_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@
#define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */
#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */

#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */

/* Wrapper for extended mode pgprot twiddling */
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)

Expand Down Expand Up @@ -164,6 +166,8 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
_PAGE_DIRTY | _PAGE_SPECIAL)

#define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED))

#ifndef __ASSEMBLY__

#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
Expand Down
16 changes: 16 additions & 0 deletions trunk/arch/sh/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)

#define tlb_migrate_finish(mm) do { } while (0)

#ifdef CONFIG_CPU_SH4
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
#else
static inline void tlb_wire_entry(struct vm_area_struct *vma ,
unsigned long addr, pte_t pte)
{
BUG();
}

static inline void tlb_unwire_entry(void)
{
BUG();
}
#endif /* CONFIG_CPU_SH4 */

#else /* CONFIG_MMU */

#define tlb_start_vma(tlb, vma) do { } while (0)
Expand Down
4 changes: 4 additions & 0 deletions trunk/arch/sh/include/cpu-sh4/cpu/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@

#define MMUCR_TI (1<<2)

#define MMUCR_URB 0x00FC0000
#define MMUCR_URB_SHIFT 18
#define MMUCR_URB_NENTRIES 64

#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
#define MMUCR_SE (1 << 4)
#else
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/sh/kernel/cpu/fpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ void fpu_state_restore(struct pt_regs *regs)
}

if (!tsk_used_math(tsk)) {
local_irq_enable();
/*
* does a slab alloc which can sleep
*/
Expand All @@ -67,7 +66,6 @@ void fpu_state_restore(struct pt_regs *regs)
do_group_exit(SIGKILL);
return;
}
local_irq_disable();
}

grab_fpu(regs);
Expand Down
241 changes: 0 additions & 241 deletions trunk/arch/sh/kernel/head_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
* arch/sh/kernel/head.S
*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2010 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
Expand Down Expand Up @@ -85,236 +84,6 @@ ENTRY(_stext)
ldc r0, r7_bank ! ... and initial thread_info
#endif

#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
/*
* Reconfigure the initial PMB mappings setup by the hardware.
*
* When we boot in 32-bit MMU mode there are 2 PMB entries already
* setup for us.
*
* Entry VPN PPN V SZ C UB WT
* ---------------------------------------------------------------
* 0 0x80000000 0x00000000 1 512MB 1 0 1
* 1 0xA0000000 0x00000000 1 512MB 0 0 0
*
* But we reprogram them here because we want complete control over
* our address space and the initial mappings may not map PAGE_OFFSET
* to __MEMORY_START (or even map all of our RAM).
*
* Once we've setup cached and uncached mappings for all of RAM we
* clear the rest of the PMB entries.
*
* This clearing also deals with the fact that PMB entries can persist
* across reboots. The PMB could have been left in any state when the
* reboot occurred, so to be safe we clear all entries and start with
* with a clean slate.
*/

mov.l .LMMUCR, r1 /* Flush the TLB */
mov.l @r1, r0
or #MMUCR_TI, r0
mov.l r0, @r1

mov.l .LMEMORY_SIZE, r5
mov r5, r7

mov #PMB_E_SHIFT, r0
mov #0x1, r4
shld r0, r4

mov.l .LFIRST_DATA_ENTRY, r0
mov.l .LPMB_DATA, r1
mov.l .LFIRST_ADDR_ENTRY, r2
mov.l .LPMB_ADDR, r3

mov #0, r10

/*
* r0 = PMB_DATA data field
* r1 = PMB_DATA address field
* r2 = PMB_ADDR data field
* r3 = PMB_ADDR address field
* r4 = PMB_E_SHIFT
* r5 = remaining amount of RAM to map
* r6 = PMB mapping size we're trying to use
* r7 = cached_to_uncached
* r8 = scratch register
* r9 = scratch register
* r10 = number of PMB entries we've setup
*/
.L512:
mov #(512 >> 4), r6
shll16 r6
shll8 r6

cmp/hi r5, r6
bt .L128

mov #(PMB_SZ_512M >> 2), r9
shll2 r9

/*
* Cached mapping
*/
mov #PMB_C, r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov.l r2, @r3

add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */

add #1, r10 /* Increment number of PMB entries */

/*
* Uncached mapping
*/
mov #(PMB_UB >> 8), r8
shll8 r8

or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3

add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */

add #1, r10 /* Increment number of PMB entries */

sub r6, r5
add r6, r0
add r6, r2

bra .L512

.L128:
mov #(128 >> 4), r6
shll16 r6
shll8 r6

cmp/hi r5, r6
bt .L64

mov #(PMB_SZ_128M >> 2), r9
shll2 r9

/*
* Cached mapping
*/
mov #PMB_C, r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov.l r2, @r3

add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */

add #1, r10 /* Increment number of PMB entries */

/*
* Uncached mapping
*/
mov #(PMB_UB >> 8), r8
shll8 r8

or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3

add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */

add #1, r10 /* Increment number of PMB entries */

sub r6, r5
add r6, r0
add r6, r2

bra .L128

.L64:
mov #(64 >> 4), r6
shll16 r6
shll8 r6

cmp/hi r5, r6
bt .Ldone

mov #(PMB_SZ_64M >> 2), r9
shll2 r9

/*
* Cached mapping
*/
mov #PMB_C, r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov.l r2, @r3

add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */

add #1, r10 /* Increment number of PMB entries */

/*
* Uncached mapping
*/
mov #(PMB_UB >> 8), r8
shll8 r8

or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3

add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */

add #1, r10 /* Increment number of PMB entries */

sub r6, r5
add r6, r0
add r6, r2

bra .L64

.Ldone:
/* Update cached_to_uncached */
mov.l .Lcached_to_uncached, r0
mov.l r7, @r0

/*
* Clear the remaining PMB entries.
*
* r3 = entry to begin clearing from
* r10 = number of entries we've setup so far
*/
mov #0, r1
mov #PMB_ENTRY_MAX, r0

.Lagain:
mov.l r1, @r3 /* Clear PMB_ADDR entry */
add #1, r10 /* Increment the loop counter */
cmp/eq r0, r10
bf/s .Lagain
add r4, r3 /* Increment to the next PMB_ADDR entry */

mov.l 6f, r0
icbi @r0

#endif /* !CONFIG_PMB_LEGACY */

#ifndef CONFIG_SH_NO_BSS_INIT
/*
* Don't clear BSS if running on slow platforms such as an RTL simulation,
Expand Down Expand Up @@ -364,13 +133,3 @@ ENTRY(stack_start)
5: .long start_kernel
6: .long sh_cpu_init
7: .long init_thread_union

#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
.LPMB_ADDR: .long PMB_ADDR
.LPMB_DATA: .long PMB_DATA
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR
.Lcached_to_uncached: .long cached_to_uncached
.LMEMORY_SIZE: .long __MEMORY_SIZE
#endif
Loading

0 comments on commit 03c1e88

Please sign in to comment.