From aa547e0f6f34333095bb3de641339a09fb990beb Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 11 Oct 2009 17:56:17 +0100 Subject: [PATCH] --- yaml --- r: 173394 b: refs/heads/master c: d26cddbbd23b81eac4fcf340b633e97b40b8d3a1 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/sh/boot/compressed/misc.c | 2 +- trunk/arch/sh/include/asm/addrspace.h | 9 +- trunk/arch/sh/include/asm/dwarf.h | 16 -- trunk/arch/sh/include/asm/ftrace.h | 47 ++++ trunk/arch/sh/include/asm/io.h | 4 +- trunk/arch/sh/include/asm/mmu.h | 13 +- trunk/arch/sh/include/asm/pgtable.h | 26 +- trunk/arch/sh/include/asm/pgtable_32.h | 2 +- trunk/arch/sh/include/asm/scatterlist.h | 2 +- trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c | 28 +-- trunk/arch/sh/kernel/dwarf.c | 185 ++++---------- trunk/arch/sh/kernel/head_32.S | 2 +- trunk/arch/sh/kernel/machine_kexec.c | 2 +- trunk/arch/sh/kernel/module.c | 32 --- trunk/arch/sh/kernel/setup.c | 4 - trunk/arch/sh/mm/Kconfig | 2 + trunk/arch/sh/mm/Makefile | 3 +- trunk/arch/sh/mm/cache-sh4.c | 8 +- trunk/arch/sh/mm/cache-sh7705.c | 2 +- trunk/arch/sh/mm/consistent.c | 2 +- trunk/arch/sh/mm/init.c | 8 - trunk/arch/sh/mm/pmb-fixed.c | 45 ++++ trunk/arch/sh/mm/pmb.c | 268 ++++++++++----------- 24 files changed, 319 insertions(+), 395 deletions(-) create mode 100644 trunk/arch/sh/mm/pmb-fixed.c diff --git a/[refs] b/[refs] index 52f92e4f9323..5d64bfd23535 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8ec006c58775869175edee3d23f4525b6df2935a +refs/heads/master: d26cddbbd23b81eac4fcf340b633e97b40b8d3a1 diff --git a/trunk/arch/sh/boot/compressed/misc.c b/trunk/arch/sh/boot/compressed/misc.c index b51b1fc4baae..fd56a71ca9d9 100644 --- a/trunk/arch/sh/boot/compressed/misc.c +++ b/trunk/arch/sh/boot/compressed/misc.c @@ -131,7 +131,7 @@ void decompress_kernel(void) #ifdef CONFIG_SUPERH64 output_addr = (CONFIG_MEMORY_START + 0x2000); #else - output_addr = __pa((unsigned long)&_text+PAGE_SIZE); + output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); #ifdef CONFIG_29BIT output_addr |= P2SEG; #endif diff --git a/trunk/arch/sh/include/asm/addrspace.h b/trunk/arch/sh/include/asm/addrspace.h index 99d6b3ecbe22..80d40813e057 100644 --- a/trunk/arch/sh/include/asm/addrspace.h +++ b/trunk/arch/sh/include/asm/addrspace.h @@ -28,6 +28,9 @@ /* Returns the privileged segment base of a given address */ #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) +/* Returns the physical address of a PnSEG (n=1,2) address */ +#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) + #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) /* * Map an address to a certain privileged segment @@ -57,11 +60,5 @@ #define P3_ADDR_MAX P4SEG #endif -#ifndef __ASSEMBLY__ -#ifdef CONFIG_PMB -extern int __in_29bit_mode(void); -#endif /* CONFIG_PMB */ -#endif /* __ASSEMBLY__ */ - #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/trunk/arch/sh/include/asm/dwarf.h b/trunk/arch/sh/include/asm/dwarf.h index fc51e66f2380..ced6795891a6 100644 --- a/trunk/arch/sh/include/asm/dwarf.h +++ b/trunk/arch/sh/include/asm/dwarf.h @@ -241,12 +241,6 @@ struct dwarf_cie { unsigned long flags; #define DWARF_CIE_Z_AUGMENTATION (1 << 0) - - /* - * 'mod' will be non-NULL if this CIE came from a module's - * .eh_frame section. - */ - struct module *mod; }; /** @@ -261,12 +255,6 @@ struct dwarf_fde { unsigned char *instructions; unsigned char *end; struct list_head link; - - /* - * 'mod' will be non-NULL if this FDE came from a module's - * .eh_frame section. - */ - struct module *mod; }; /** @@ -376,10 +364,6 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); -extern void dwarf_free_frame(struct dwarf_frame *); -extern int dwarf_parse_section(char *, char *, struct module *); -extern void dwarf_module_unload(struct module *); - #endif /* !__ASSEMBLY__ */ #define CFI_STARTPROC .cfi_startproc diff --git a/trunk/arch/sh/include/asm/ftrace.h b/trunk/arch/sh/include/asm/ftrace.h index 12f3a31f20af..5ea9030725c0 100644 --- a/trunk/arch/sh/include/asm/ftrace.h +++ b/trunk/arch/sh/include/asm/ftrace.h @@ -32,6 +32,53 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) return addr; } + +#ifdef CONFIG_DWARF_UNWINDER +#include + +#define HAVE_ARCH_CALLER_ADDR + +static inline unsigned long dwarf_return_address(int depth) +{ + struct dwarf_frame *frame; + unsigned long ra; + int i; + + for (i = 0, frame = NULL, ra = 0; i <= depth; i++) { + struct dwarf_frame *tmp; + + tmp = dwarf_unwind_stack(ra, frame); + + if (frame) + dwarf_free_frame(frame); + + frame = tmp; + + if (!frame || !frame->return_addr) + break; + + ra = frame->return_addr; + } + + /* Failed to unwind the stack to the specified depth. */ + WARN_ON(i != depth + 1); + + if (frame) + dwarf_free_frame(frame); + + return ra; +} + +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 dwarf_return_address(1) +#define CALLER_ADDR2 dwarf_return_address(2) +#define CALLER_ADDR3 dwarf_return_address(3) +#define CALLER_ADDR4 dwarf_return_address(4) +#define CALLER_ADDR5 dwarf_return_address(5) +#define CALLER_ADDR6 dwarf_return_address(6) + +#endif /* CONFIG_DWARF_UNWINDER */ + #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/trunk/arch/sh/include/asm/io.h b/trunk/arch/sh/include/asm/io.h index 0cf2a5708e26..5be45ea4dfec 100644 --- a/trunk/arch/sh/include/asm/io.h +++ b/trunk/arch/sh/include/asm/io.h @@ -246,7 +246,7 @@ void __iounmap(void __iomem *addr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -255,7 +255,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. diff --git a/trunk/arch/sh/include/asm/mmu.h b/trunk/arch/sh/include/asm/mmu.h index c7426ad9926e..f5963037c9d6 100644 --- a/trunk/arch/sh/include/asm/mmu.h +++ b/trunk/arch/sh/include/asm/mmu.h @@ -7,16 +7,12 @@ #define PMB_PASCR 0xff000070 #define PMB_IRMCR 0xff000078 -#define PASCR_SE 0x80000000 - #define PMB_ADDR 0xf6100000 #define PMB_DATA 0xf7100000 #define PMB_ENTRY_MAX 16 #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 -#define PMB_PFN_MASK 0xff000000 - #define PMB_SZ_16M 0x00000000 #define PMB_SZ_64M 0x00000010 #define PMB_SZ_128M 0x00000080 @@ -66,10 +62,17 @@ struct pmb_entry { }; /* arch/sh/mm/pmb.c */ +int __set_pmb_entry(unsigned long vpn, unsigned long ppn, + unsigned long flags, int *entry); +int set_pmb_entry(struct pmb_entry *pmbe); +void clear_pmb_entry(struct pmb_entry *pmbe); +struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, + unsigned long flags); +void pmb_free(struct pmb_entry *pmbe); long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); -int pmb_init(void); #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ + diff --git a/trunk/arch/sh/include/asm/pgtable.h b/trunk/arch/sh/include/asm/pgtable.h index ba3046e4f06f..4f3efa7d5a64 100644 --- a/trunk/arch/sh/include/asm/pgtable.h +++ b/trunk/arch/sh/include/asm/pgtable.h @@ -75,31 +75,13 @@ static inline unsigned long long neff_sign_extend(unsigned long val) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 -#define PHYS_ADDR_MASK29 0x1fffffff -#define PHYS_ADDR_MASK32 0xffffffff - -#ifdef CONFIG_PMB -static inline unsigned long phys_addr_mask(void) -{ - /* Is the MMU in 29bit mode? */ - if (__in_29bit_mode()) - return PHYS_ADDR_MASK29; - - return PHYS_ADDR_MASK32; -} -#elif defined(CONFIG_32BIT) -static inline unsigned long phys_addr_mask(void) -{ - return PHYS_ADDR_MASK32; -} +#ifdef CONFIG_32BIT +#define PHYS_ADDR_MASK 0xffffffff #else -static inline unsigned long phys_addr_mask(void) -{ - return PHYS_ADDR_MASK29; -} +#define PHYS_ADDR_MASK 0x1fffffff #endif -#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) +#define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) #ifdef CONFIG_SUPERH32 diff --git a/trunk/arch/sh/include/asm/pgtable_32.h b/trunk/arch/sh/include/asm/pgtable_32.h index b35435516203..c0d359ce337b 100644 --- a/trunk/arch/sh/include/asm/pgtable_32.h +++ b/trunk/arch/sh/include/asm/pgtable_32.h @@ -108,7 +108,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) #endif -#define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS)) +#define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) /* Hardware flags, page size encoding */ #if !defined(CONFIG_MMU) diff --git a/trunk/arch/sh/include/asm/scatterlist.h b/trunk/arch/sh/include/asm/scatterlist.h index e38d1d4c7f6f..327cc2e4c97b 100644 --- a/trunk/arch/sh/include/asm/scatterlist.h +++ b/trunk/arch/sh/include/asm/scatterlist.h @@ -1,7 +1,7 @@ #ifndef __ASM_SH_SCATTERLIST_H #define __ASM_SH_SCATTERLIST_H -#define ISA_DMA_THRESHOLD phys_addr_mask() +#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK #include diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 485330cf8549..e848443deeb9 100644 --- a/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -268,11 +268,7 @@ enum { UNUSED = 0, /* interrupt sources */ - IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, - IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, - IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, - IRL_HHLL, IRL_HHLH, IRL_HHHL, - IRQ0, IRQ1, IRQ2, IRQ3, + IRL, IRQ0, IRQ1, IRQ2, IRQ3, HUDII, TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, PCII0, PCII1, PCII2, PCII3, PCII4, @@ -295,7 +291,7 @@ enum { INTICI4, INTICI5, INTICI6, INTICI7, /* interrupt groups */ - IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, + PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, DMAC0, DMAC1, }; @@ -348,10 +344,6 @@ static struct intc_vect vectors[] __initdata = { }; static struct intc_group groups[] __initdata = { - INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, - IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, - IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, - IRL_HHLL, IRL_HHLH, IRL_HHHL), INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), @@ -427,14 +419,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, /* External interrupt pins in IRL mode */ static struct intc_vect vectors_irl[] __initdata = { - INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), - INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), - INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), - INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), - INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), - INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), - INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), - INTC_VECT(IRL_HHHL, 0x3c0), + INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220), + INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260), + INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0), + INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0), + INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320), + INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360), + INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0), + INTC_VECT(IRL, 0x3c0), }; static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, diff --git a/trunk/arch/sh/kernel/dwarf.c b/trunk/arch/sh/kernel/dwarf.c index c274039e9c8d..03b3616c80a5 100644 --- a/trunk/arch/sh/kernel/dwarf.c +++ b/trunk/arch/sh/kernel/dwarf.c @@ -529,18 +529,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, } /** - * dwarf_free_frame - free the memory allocated for @frame - * @frame: the frame to free - */ -void dwarf_free_frame(struct dwarf_frame *frame) -{ - dwarf_frame_free_regs(frame); - mempool_free(frame, dwarf_frame_pool); -} - -/** - * dwarf_unwind_stack - unwind the stack - * + * dwarf_unwind_stack - recursively unwind the stack * @pc: address of the function to unwind * @prev: struct dwarf_frame of the previous stackframe on the callstack * @@ -558,9 +547,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, unsigned long addr; /* - * If we're starting at the top of the stack we need get the - * contents of a physical register to get the CFA in order to - * begin the virtual unwinding of the stack. + * If this is the first invocation of this recursive function we + * need get the contents of a physical register to get the CFA + * in order to begin the virtual unwinding of the stack. * * NOTE: the return address is guaranteed to be setup by the * time this function makes its first function call. @@ -582,8 +571,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, fde = dwarf_lookup_fde(pc); if (!fde) { /* - * This is our normal exit path. There are two reasons - * why we might exit here, + * This is our normal exit path - the one that stops the + * recursion. There's two reasons why we might exit + * here, * * a) pc has no asscociated DWARF frame info and so * we don't know how to unwind this frame. This is @@ -625,10 +615,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, } else { /* - * Again, we're starting from the top of the - * stack. We need to physically read - * the contents of a register in order to get - * the Canonical Frame Address for this + * Again, this is the first invocation of this + * recurisve function. We need to physically + * read the contents of a register in order to + * get the Canonical Frame Address for this * function. */ frame->cfa = dwarf_read_arch_reg(frame->cfa_register); @@ -658,12 +648,13 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, return frame; bail: - dwarf_free_frame(frame); + dwarf_frame_free_regs(frame); + mempool_free(frame, dwarf_frame_pool); return NULL; } static int dwarf_parse_cie(void *entry, void *p, unsigned long len, - unsigned char *end, struct module *mod) + unsigned char *end) { struct dwarf_cie *cie; unsigned long flags; @@ -759,8 +750,6 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, cie->initial_instructions = p; cie->instructions_end = end; - cie->mod = mod; - /* Add to list */ spin_lock_irqsave(&dwarf_cie_lock, flags); list_add_tail(&cie->link, &dwarf_cie_list); @@ -771,7 +760,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, static int dwarf_parse_fde(void *entry, u32 entry_type, void *start, unsigned long len, - unsigned char *end, struct module *mod) + unsigned char *end) { struct dwarf_fde *fde; struct dwarf_cie *cie; @@ -820,8 +809,6 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, fde->instructions = p; fde->end = end; - fde->mod = mod; - /* Add to list. */ spin_lock_irqsave(&dwarf_fde_lock, flags); list_add_tail(&fde->link, &dwarf_fde_list); @@ -845,8 +832,10 @@ static void dwarf_unwinder_dump(struct task_struct *task, while (1) { frame = dwarf_unwind_stack(return_addr, _frame); - if (_frame) - dwarf_free_frame(_frame); + if (_frame) { + dwarf_frame_free_regs(_frame); + mempool_free(_frame, dwarf_frame_pool); + } _frame = frame; @@ -856,9 +845,6 @@ static void dwarf_unwinder_dump(struct task_struct *task, return_addr = frame->return_addr; ops->address(data, return_addr, 1); } - - if (frame) - dwarf_free_frame(frame); } static struct unwinder dwarf_unwinder = { @@ -888,15 +874,15 @@ static void dwarf_unwinder_cleanup(void) } /** - * dwarf_parse_section - parse DWARF section - * @eh_frame_start: start address of the .eh_frame section - * @eh_frame_end: end address of the .eh_frame section - * @mod: the kernel module containing the .eh_frame section + * dwarf_unwinder_init - initialise the dwarf unwinder * - * Parse the information in a .eh_frame section. + * Build the data structures describing the .dwarf_frame section to + * make it easier to lookup CIE and FDE entries. Because the + * .eh_frame section is packed as tightly as possible it is not + * easy to lookup the FDE for a given PC, so we build a list of FDE + * and CIE entries that make it easier. */ -int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, - struct module *mod) +static int __init dwarf_unwinder_init(void) { u32 entry_type; void *p, *entry; @@ -904,12 +890,32 @@ int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, unsigned long len; unsigned int c_entries, f_entries; unsigned char *end; + INIT_LIST_HEAD(&dwarf_cie_list); + INIT_LIST_HEAD(&dwarf_fde_list); c_entries = 0; f_entries = 0; - entry = eh_frame_start; + entry = &__start_eh_frame; + + dwarf_frame_cachep = kmem_cache_create("dwarf_frames", + sizeof(struct dwarf_frame), 0, + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + + dwarf_reg_cachep = kmem_cache_create("dwarf_regs", + sizeof(struct dwarf_reg), 0, + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + + dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, + mempool_alloc_slab, + mempool_free_slab, + dwarf_frame_cachep); - while ((char *)entry < eh_frame_end) { + dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, + mempool_alloc_slab, + mempool_free_slab, + dwarf_reg_cachep); + + while ((char *)entry < __stop_eh_frame) { p = entry; count = dwarf_entry_len(p, &len); @@ -921,7 +927,6 @@ int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, * entry and move to the next one because 'len' * tells us where our next entry is. */ - err = -EINVAL; goto out; } else p += count; @@ -933,14 +938,13 @@ int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, p += 4; if (entry_type == DW_EH_FRAME_CIE) { - err = dwarf_parse_cie(entry, p, len, end, mod); + err = dwarf_parse_cie(entry, p, len, end); if (err < 0) goto out; else c_entries++; } else { - err = dwarf_parse_fde(entry, entry_type, p, len, - end, mod); + err = dwarf_parse_fde(entry, entry_type, p, len, end); if (err < 0) goto out; else @@ -953,95 +957,6 @@ int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", c_entries, f_entries); - return 0; - -out: - return err; -} - -/** - * dwarf_module_unload - remove FDE/CIEs associated with @mod - * @mod: the module that is being unloaded - * - * Remove any FDEs and CIEs from the global lists that came from - * @mod's .eh_frame section because @mod is being unloaded. - */ -void dwarf_module_unload(struct module *mod) -{ - struct dwarf_fde *fde; - struct dwarf_cie *cie; - unsigned long flags; - - spin_lock_irqsave(&dwarf_cie_lock, flags); - -again_cie: - list_for_each_entry(cie, &dwarf_cie_list, link) { - if (cie->mod == mod) - break; - } - - if (&cie->link != &dwarf_cie_list) { - list_del(&cie->link); - kfree(cie); - goto again_cie; - } - - spin_unlock_irqrestore(&dwarf_cie_lock, flags); - - spin_lock_irqsave(&dwarf_fde_lock, flags); - -again_fde: - list_for_each_entry(fde, &dwarf_fde_list, link) { - if (fde->mod == mod) - break; - } - - if (&fde->link != &dwarf_fde_list) { - list_del(&fde->link); - kfree(fde); - goto again_fde; - } - - spin_unlock_irqrestore(&dwarf_fde_lock, flags); -} - -/** - * dwarf_unwinder_init - initialise the dwarf unwinder - * - * Build the data structures describing the .dwarf_frame section to - * make it easier to lookup CIE and FDE entries. Because the - * .eh_frame section is packed as tightly as possible it is not - * easy to lookup the FDE for a given PC, so we build a list of FDE - * and CIE entries that make it easier. - */ -static int __init dwarf_unwinder_init(void) -{ - int err; - INIT_LIST_HEAD(&dwarf_cie_list); - INIT_LIST_HEAD(&dwarf_fde_list); - - dwarf_frame_cachep = kmem_cache_create("dwarf_frames", - sizeof(struct dwarf_frame), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); - - dwarf_reg_cachep = kmem_cache_create("dwarf_regs", - sizeof(struct dwarf_reg), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); - - dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, - mempool_alloc_slab, - mempool_free_slab, - dwarf_frame_cachep); - - dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, - mempool_alloc_slab, - mempool_free_slab, - dwarf_reg_cachep); - - err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); - if (err) - goto out; - err = unwinder_register(&dwarf_unwinder); if (err) goto out; diff --git a/trunk/arch/sh/kernel/head_32.S b/trunk/arch/sh/kernel/head_32.S index 1151ecdffa71..a78be74b8d3e 100644 --- a/trunk/arch/sh/kernel/head_32.S +++ b/trunk/arch/sh/kernel/head_32.S @@ -33,7 +33,7 @@ ENTRY(empty_zero_page) .long 1 /* LOADER_TYPE */ .long 0x00000000 /* INITRD_START */ .long 0x00000000 /* INITRD_SIZE */ -#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED) +#ifdef CONFIG_32BIT .long 0x53453f00 + 32 /* "SE?" = 32 bit */ #else .long 0x53453f00 + 29 /* "SE?" = 29 bit */ diff --git a/trunk/arch/sh/kernel/machine_kexec.c b/trunk/arch/sh/kernel/machine_kexec.c index de7cf5477d3f..7ea2704ea033 100644 --- a/trunk/arch/sh/kernel/machine_kexec.c +++ b/trunk/arch/sh/kernel/machine_kexec.c @@ -49,7 +49,7 @@ int machine_kexec_prepare(struct kimage *image) /* older versions of kexec-tools are passing * the zImage entry point as a virtual address. */ - if (image->start != __pa(image->start)) + if (image->start != PHYSADDR(image->start)) return -EINVAL; /* upgrade your kexec-tools */ return 0; diff --git a/trunk/arch/sh/kernel/module.c b/trunk/arch/sh/kernel/module.c index d297a148d16c..c2efdcde266f 100644 --- a/trunk/arch/sh/kernel/module.c +++ b/trunk/arch/sh/kernel/module.c @@ -32,7 +32,6 @@ #include #include #include -#include void *module_alloc(unsigned long size) { @@ -146,41 +145,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { -#ifdef CONFIG_DWARF_UNWINDER - unsigned int i, err; - unsigned long start, end; - char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - - start = end = 0; - - for (i = 1; i < hdr->e_shnum; i++) { - /* Alloc bit cleared means "ignore it." */ - if ((sechdrs[i].sh_flags & SHF_ALLOC) - && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { - start = sechdrs[i].sh_addr; - end = start + sechdrs[i].sh_size; - break; - } - } - - /* Did we find the .eh_frame section? */ - if (i != hdr->e_shnum) { - err = dwarf_parse_section((char *)start, (char *)end, me); - if (err) - printk(KERN_WARNING "%s: failed to parse DWARF info\n", - me->name); - } - -#endif /* CONFIG_DWARF_UNWINDER */ - return module_bug_finalize(hdr, sechdrs, me); } void module_arch_cleanup(struct module *mod) { module_bug_cleanup(mod); - -#ifdef CONFIG_DWARF_UNWINDER - dwarf_module_unload(mod); -#endif /* CONFIG_DWARF_UNWINDER */ } diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index df65fe2d43b8..f9d44f8e0df6 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -453,10 +453,6 @@ void __init setup_arch(char **cmdline_p) paging_init(); -#ifdef CONFIG_PMB_ENABLE - pmb_init(); -#endif - #ifdef CONFIG_SMP plat_smp_setup(); #endif diff --git a/trunk/arch/sh/mm/Kconfig b/trunk/arch/sh/mm/Kconfig index b8a9032c74be..64dc1ad59801 100644 --- a/trunk/arch/sh/mm/Kconfig +++ b/trunk/arch/sh/mm/Kconfig @@ -83,6 +83,7 @@ config 32BIT config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + select 32BIT default y help If you say Y here, physical addressing will be extended to @@ -97,6 +98,7 @@ choice config PMB bool "PMB" depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + select 32BIT help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy diff --git a/trunk/arch/sh/mm/Makefile b/trunk/arch/sh/mm/Makefile index 8a70535fa7ce..3759bf853293 100644 --- a/trunk/arch/sh/mm/Makefile +++ b/trunk/arch/sh/mm/Makefile @@ -33,7 +33,8 @@ obj-y += $(tlb-y) endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PMB_ENABLE) += pmb.o +obj-$(CONFIG_PMB) += pmb.o +obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o obj-$(CONFIG_NUMA) += numa.o # Special flags for fault_64.o. This puts restrictions on the number of diff --git a/trunk/arch/sh/mm/cache-sh4.c b/trunk/arch/sh/mm/cache-sh4.c index 56dd55a1b13e..60588c5bf7f9 100644 --- a/trunk/arch/sh/mm/cache-sh4.c +++ b/trunk/arch/sh/mm/cache-sh4.c @@ -88,16 +88,16 @@ static inline void flush_cache_4096(unsigned long start, unsigned long flags, exec_offset = 0; /* - * All types of SH-4 require PC to be uncached to operate on the I-cache. - * Some types of SH-4 require PC to be uncached to operate on the D-cache. + * All types of SH-4 require PC to be in P2 to operate on the I-cache. + * Some types of SH-4 require PC to be in P2 to operate on the D-cache. */ if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) - exec_offset = cached_to_uncached; + exec_offset = 0x20000000; local_irq_save(flags); __flush_cache_4096(start | SH_CACHE_ASSOC, - virt_to_phys(phys), exec_offset); + P1SEGADDR(phys), exec_offset); local_irq_restore(flags); } diff --git a/trunk/arch/sh/mm/cache-sh7705.c b/trunk/arch/sh/mm/cache-sh7705.c index f527fb70fce6..2601935eb589 100644 --- a/trunk/arch/sh/mm/cache-sh7705.c +++ b/trunk/arch/sh/mm/cache-sh7705.c @@ -141,7 +141,7 @@ static void sh7705_flush_dcache_page(void *arg) if (mapping && !mapping_mapped(mapping)) set_bit(PG_dcache_dirty, &page->flags); else - __flush_dcache_page(__pa(page_address(page))); + __flush_dcache_page(PHYSADDR(page_address(page))); } static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) diff --git a/trunk/arch/sh/mm/consistent.c b/trunk/arch/sh/mm/consistent.c index 9a8403d9344b..e098ec158ddb 100644 --- a/trunk/arch/sh/mm/consistent.c +++ b/trunk/arch/sh/mm/consistent.c @@ -85,7 +85,7 @@ EXPORT_SYMBOL(dma_free_coherent); void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { -#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) +#ifdef CONFIG_CPU_SH5 void *p1addr = vaddr; #else void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); diff --git a/trunk/arch/sh/mm/init.c b/trunk/arch/sh/mm/init.c index c8af6c5fa586..8173e38afd38 100644 --- a/trunk/arch/sh/mm/init.c +++ b/trunk/arch/sh/mm/init.c @@ -323,12 +323,4 @@ int memory_add_physaddr_to_nid(u64 addr) } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif - #endif /* CONFIG_MEMORY_HOTPLUG */ - -#ifdef CONFIG_PMB -int __in_29bit_mode(void) -{ - return !(ctrl_inl(PMB_PASCR) & PASCR_SE); -} -#endif /* CONFIG_PMB */ diff --git a/trunk/arch/sh/mm/pmb-fixed.c b/trunk/arch/sh/mm/pmb-fixed.c new file mode 100644 index 000000000000..43c8eac4d8a1 --- /dev/null +++ b/trunk/arch/sh/mm/pmb-fixed.c @@ -0,0 +1,45 @@ +/* + * arch/sh/mm/fixed_pmb.c + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int __uses_jump_to_uncached fixed_pmb_init(void) +{ + int i; + unsigned long addr, data; + + jump_to_uncached(); + + for (i = 0; i < PMB_ENTRY_MAX; i++) { + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; + + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); + } + + back_to_cached(); + + return 0; +} +arch_initcall(fixed_pmb_init); diff --git a/trunk/arch/sh/mm/pmb.c b/trunk/arch/sh/mm/pmb.c index 280f6a166035..aade31102112 100644 --- a/trunk/arch/sh/mm/pmb.c +++ b/trunk/arch/sh/mm/pmb.c @@ -35,9 +35,29 @@ static void __pmb_unmap(struct pmb_entry *); -static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; +static struct kmem_cache *pmb_cache; static unsigned long pmb_map; +static struct pmb_entry pmb_init_map[] = { + /* vpn ppn flags (ub/sz/c/wt) */ + + /* P1 Section Mappings */ + { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, + { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, + { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, + { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, + { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, + { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, + + /* P2 Section Mappings */ + { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, + { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, + { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, +}; + static inline unsigned long mk_pmb_entry(unsigned int entry) { return (entry & PMB_E_MASK) << PMB_E_SHIFT; @@ -53,68 +73,81 @@ static inline unsigned long mk_pmb_data(unsigned int entry) return mk_pmb_entry(entry) | PMB_DATA; } -static int pmb_alloc_entry(void) +static DEFINE_SPINLOCK(pmb_list_lock); +static struct pmb_entry *pmb_list; + +static inline void pmb_list_add(struct pmb_entry *pmbe) { - unsigned int pos; + struct pmb_entry **p, *tmp; -repeat: - pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + p = &pmb_list; + while ((tmp = *p) != NULL) + p = &tmp->next; - if (unlikely(pos > NR_PMB_ENTRIES)) - return -ENOSPC; + pmbe->next = tmp; + *p = pmbe; +} - if (test_and_set_bit(pos, &pmb_map)) - goto repeat; +static inline void pmb_list_del(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; - return pos; + for (p = &pmb_list; (tmp = *p); p = &tmp->next) + if (tmp == pmbe) { + *p = tmp->next; + return; + } } -static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, - unsigned long flags, int entry) +struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, + unsigned long flags) { struct pmb_entry *pmbe; - int pos; - - if (entry == PMB_NO_ENTRY) { - pos = pmb_alloc_entry(); - if (pos < 0) - return ERR_PTR(pos); - } else { - if (test_bit(entry, &pmb_map)) - return ERR_PTR(-ENOSPC); - pos = entry; - } - pmbe = &pmb_entry_list[pos]; + pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); if (!pmbe) return ERR_PTR(-ENOMEM); pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; - pmbe->entry = pos; + + spin_lock_irq(&pmb_list_lock); + pmb_list_add(pmbe); + spin_unlock_irq(&pmb_list_lock); return pmbe; } -static void pmb_free(struct pmb_entry *pmbe) +void pmb_free(struct pmb_entry *pmbe) { - int pos = pmbe->entry; + spin_lock_irq(&pmb_list_lock); + pmb_list_del(pmbe); + spin_unlock_irq(&pmb_list_lock); - pmbe->vpn = 0; - pmbe->ppn = 0; - pmbe->flags = 0; - pmbe->entry = 0; - - clear_bit(pos, &pmb_map); + kmem_cache_free(pmb_cache, pmbe); } /* * Must be in P2 for __set_pmb_entry() */ -static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int pos) +int __set_pmb_entry(unsigned long vpn, unsigned long ppn, + unsigned long flags, int *entry) { + unsigned int pos = *entry; + + if (unlikely(pos == PMB_NO_ENTRY)) + pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + +repeat: + if (unlikely(pos > NR_PMB_ENTRIES)) + return -ENOSPC; + + if (test_and_set_bit(pos, &pmb_map)) { + pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + goto repeat; + } + ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); #ifdef CONFIG_CACHE_WRITETHROUGH @@ -128,21 +161,35 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, #endif ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); + + *entry = pos; + + return 0; } -static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) +int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) { + int ret; + jump_to_uncached(); - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); + ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); back_to_cached(); + + return ret; } -static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) +void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; - if (unlikely(entry >= NR_PMB_ENTRIES)) + /* + * Don't allow clearing of wired init entries, P1 or P2 access + * without a corresponding mapping in the PMB will lead to reset + * by the TLB. + */ + if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || + entry >= NR_PMB_ENTRIES)) return; jump_to_uncached(); @@ -155,6 +202,8 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); back_to_cached(); + + clear_bit(entry, &pmb_map); } @@ -190,17 +239,23 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { + int ret; + if (size < pmb_sizes[i].size) continue; - pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, - PMB_NO_ENTRY); + pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); if (IS_ERR(pmbe)) { err = PTR_ERR(pmbe); goto out; } - set_pmb_entry(pmbe); + ret = set_pmb_entry(pmbe); + if (ret != 0) { + pmb_free(pmbe); + err = -EBUSY; + goto out; + } phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; @@ -237,16 +292,11 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, void pmb_unmap(unsigned long addr) { - struct pmb_entry *pmbe = NULL; - int i; + struct pmb_entry **p, *pmbe; - for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { - pmbe = &pmb_entry_list[i]; - if (pmbe->vpn == addr) - break; - } - } + for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) + if (pmbe->vpn == addr) + break; if (unlikely(!pmbe)) return; @@ -256,22 +306,13 @@ void pmb_unmap(unsigned long addr) static void __pmb_unmap(struct pmb_entry *pmbe) { - BUG_ON(!test_bit(pmbe->entry, &pmb_map)); + WARN_ON(!test_bit(pmbe->entry, &pmb_map)); do { struct pmb_entry *pmblink = pmbe; - /* - * We may be called before this pmb_entry has been - * entered into the PMB table via set_pmb_entry(), but - * that's OK because we've allocated a unique slot for - * this entry in pmb_alloc() (even if we haven't filled - * it yet). - * - * Therefore, calling clear_pmb_entry() is safe as no - * other mapping can be using that slot. - */ - clear_pmb_entry(pmbe); + if (pmbe->entry != PMB_NO_ENTRY) + clear_pmb_entry(pmbe); pmbe = pmblink->link; @@ -279,34 +320,42 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } -#ifdef CONFIG_PMB -int __uses_jump_to_uncached pmb_init(void) +static void pmb_cache_ctor(void *pmb) { - unsigned int i; - long size, ret; + struct pmb_entry *pmbe = pmb; + + memset(pmb, 0, sizeof(struct pmb_entry)); + + pmbe->entry = PMB_NO_ENTRY; +} + +static int __uses_jump_to_uncached pmb_init(void) +{ + unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); + unsigned int entry, i; + + BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); + + pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, + SLAB_PANIC, pmb_cache_ctor); jump_to_uncached(); /* - * Insert PMB entries for the P1 and P2 areas so that, after - * we've switched the MMU to 32-bit mode, the semantics of P1 - * and P2 are the same as in 29-bit mode, e.g. - * - * P1 - provides a cached window onto physical memory - * P2 - provides an uncached window onto physical memory + * Ordering is important, P2 must be mapped in the PMB before we + * can set PMB.SE, and P1 must be mapped before we jump back to + * P1 space. */ - size = __MEMORY_START + __MEMORY_SIZE; + for (entry = 0; entry < nr_entries; entry++) { + struct pmb_entry *pmbe = pmb_init_map + entry; - ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); - BUG_ON(ret != size); - - ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); - BUG_ON(ret != size); + __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); + } ctrl_outl(0, PMB_IRMCR); /* PMB.SE and UB[7] */ - ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); + ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); /* Flush out the TLB */ i = ctrl_inl(MMUCR); @@ -317,53 +366,7 @@ int __uses_jump_to_uncached pmb_init(void) return 0; } -#else -int __uses_jump_to_uncached pmb_init(void) -{ - int i; - unsigned long addr, data; - - jump_to_uncached(); - - for (i = 0; i < PMB_ENTRY_MAX; i++) { - struct pmb_entry *pmbe; - unsigned long vpn, ppn, flags; - - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - if (!(data & PMB_V)) - continue; - - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - ctrl_outl(data, addr); - - ppn = data & PMB_PFN_MASK; - - flags = data & (PMB_C | PMB_WT | PMB_UB); - flags |= data & PMB_SZ_MASK; - - addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - - vpn = data & PMB_PFN_MASK; - - pmbe = pmb_alloc(vpn, ppn, flags, i); - WARN_ON(IS_ERR(pmbe)); - } - - back_to_cached(); - - return 0; -} -#endif /* CONFIG_PMB */ +arch_initcall(pmb_init); static int pmb_seq_show(struct seq_file *file, void *iter) { @@ -431,18 +434,15 @@ postcore_initcall(pmb_debugfs_init); static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) { static pm_message_t prev_state; - int i; /* Restore the PMB after a resume from hibernation */ if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; - for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { - pmbe = &pmb_entry_list[i]; - set_pmb_entry(pmbe); - } - } + spin_lock_irq(&pmb_list_lock); + for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) + set_pmb_entry(pmbe); + spin_unlock_irq(&pmb_list_lock); } prev_state = state; return 0;