Skip to content

Commit

Permalink
Merge branch 'sh/pmb-dynamic'
Browse files Browse the repository at this point in the history
  • Loading branch information
Paul Mundt committed Feb 18, 2010
2 parents 838a4a9 + d01447b commit 77f36fc
Show file tree
Hide file tree
Showing 20 changed files with 572 additions and 283 deletions.
2 changes: 1 addition & 1 deletion arch/sh/boot/compressed/misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ void decompress_kernel(void)
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY)
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
#endif
Expand Down
22 changes: 22 additions & 0 deletions arch/sh/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,28 @@ static inline void ctrl_delay(void)
__raw_readw(generic_io_base);
}

#define __BUILD_UNCACHED_IO(bwlq, type) \
static inline type read##bwlq##_uncached(unsigned long addr) \
{ \
type ret; \
jump_to_uncached(); \
ret = __raw_read##bwlq(addr); \
back_to_cached(); \
return ret; \
} \
\
static inline void write##bwlq##_uncached(type v, unsigned long addr) \
{ \
jump_to_uncached(); \
__raw_write##bwlq(v, addr); \
back_to_cached(); \
}

__BUILD_UNCACHED_IO(b, u8)
__BUILD_UNCACHED_IO(w, u16)
__BUILD_UNCACHED_IO(l, u32)
__BUILD_UNCACHED_IO(q, u64)

#define __BUILD_MEMORY_STRING(bwlq, type) \
\
static inline void __raw_writes##bwlq(volatile void __iomem *mem, \
Expand Down
40 changes: 10 additions & 30 deletions arch/sh/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@

#define PMB_ADDR 0xf6100000
#define PMB_DATA 0xf7100000
#define PMB_ENTRY_MAX 16

#define NR_PMB_ENTRIES 16

#define PMB_E_MASK 0x0000000f
#define PMB_E_SHIFT 8

Expand All @@ -25,13 +27,15 @@
#define PMB_C 0x00000008
#define PMB_WT 0x00000001
#define PMB_UB 0x00000200
#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB)
#define PMB_V 0x00000100

#define PMB_NO_ENTRY (-1)

#ifndef __ASSEMBLY__
#include <linux/errno.h>
#include <linux/threads.h>
#include <asm/page.h>

/* Default "unsigned long" context */
typedef unsigned long mm_context_id_t[NR_CPUS];
Expand All @@ -49,46 +53,22 @@ typedef struct {
#endif
} mm_context_t;

struct pmb_entry;

struct pmb_entry {
unsigned long vpn;
unsigned long ppn;
unsigned long flags;

/*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
* PMB_NO_ENTRY to search for a free one
*/
int entry;

struct pmb_entry *next;
/* Adjacent entry link for contiguous multi-entry mappings */
struct pmb_entry *link;
};

#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags);
unsigned long size, pgprot_t prot);
void pmb_unmap(unsigned long addr);
int pmb_init(void);
void pmb_init(void);
bool __in_29bit_mode(void);
#else
static inline long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags)
unsigned long size, pgprot_t prot)
{
return -EINVAL;
}

static inline void pmb_unmap(unsigned long addr)
{
}

static inline int pmb_init(void)
{
return -ENODEV;
}
#define pmb_unmap(addr) do { } while (0)
#define pmb_init(addr) do { } while (0)

#ifdef CONFIG_29BIT
#define __in_29bit_mode() (1)
Expand Down
17 changes: 10 additions & 7 deletions arch/sh/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#endif

#ifndef __ASSEMBLY__
#include <asm/uncached.h>

extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
Expand All @@ -56,7 +57,6 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
return (addr1 ^ addr2) & shm_align_mask;
}


#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);

Expand Down Expand Up @@ -127,19 +127,22 @@ typedef struct page *pgtable_t;
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
#if defined(CONFIG_PMB_LEGACY)
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
#elif defined(CONFIG_32BIT)
#ifdef CONFIG_PMB
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
#else
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#endif

#ifdef CONFIG_UNCACHED_MAPPING
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
#else
#define UNCAC_ADDR(addr) ((addr))
#define CAC_ADDR(addr) ((addr))
#endif

#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)

Expand Down
11 changes: 2 additions & 9 deletions arch/sh/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);

#ifdef CONFIG_UNCACHED_MAPPING
/*
* If PC points in to the uncached mapping, fix it up and hand
* back the cached equivalent.
*/
if ((pc >= (memory_start + cached_to_uncached)) &&
(pc < (memory_start + cached_to_uncached + uncached_size)))
pc -= cached_to_uncached;
#endif
if (virt_addr_uncached(pc))
return CAC_ADDR(pc);

return pc;
}
Expand Down
18 changes: 18 additions & 0 deletions arch/sh/include/asm/uncached.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#ifndef __ASM_SH_UNCACHED_H
#define __ASM_SH_UNCACHED_H

#include <linux/bug.h>

#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long uncached_start, uncached_end;

extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
#endif

#endif /* __ASM_SH_UNCACHED_H */
3 changes: 2 additions & 1 deletion arch/sh/include/cpu-sh4/cpu/sq.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#define __ASM_CPU_SH4_SQ_H

#include <asm/addrspace.h>
#include <asm/page.h>

/*
* Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
Expand All @@ -28,7 +29,7 @@

/* arch/sh/kernel/cpu/sh4/sq.c */
unsigned long sq_remap(unsigned long phys, unsigned int size,
const char *name, unsigned long flags);
const char *name, pgprot_t prot);
void sq_unmap(unsigned long vaddr);
void sq_flush_range(unsigned long start, unsigned int len);

Expand Down
21 changes: 17 additions & 4 deletions arch/sh/kernel/cpu/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/sh_bios.h>

#ifdef CONFIG_SH_FPU
#define cpu_has_fpu 1
Expand Down Expand Up @@ -342,9 +343,21 @@ asmlinkage void __init sh_cpu_init(void)
speculative_execution_init();
expmask_init();

/*
* Boot processor to setup the FP and extended state context info.
*/
if (raw_smp_processor_id() == 0)
/* Do the rest of the boot processor setup */
if (raw_smp_processor_id() == 0) {
/* Save off the BIOS VBR, if there is one */
sh_bios_vbr_init();

/*
* Setup VBR for boot CPU. Secondary CPUs do this through
* start_secondary().
*/
per_cpu_trap_init();

/*
* Boot processor to setup the FP and extended state
* context info.
*/
init_thread_xstate();
}
}
13 changes: 6 additions & 7 deletions arch/sh/kernel/cpu/sh4/sq.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
spin_unlock_irq(&sq_mapping_lock);
}

static int __sq_remap(struct sq_mapping *map, unsigned long flags)
static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
{
#if defined(CONFIG_MMU)
struct vm_struct *vma;
Expand All @@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)

if (ioremap_page_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
vma->phys_addr, __pgprot(flags))) {
vma->phys_addr, prot)) {
vunmap(vma->addr);
return -EAGAIN;
}
Expand All @@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
* @flags: Protection flags.
* @prot: Protection bits.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
* the sysfs interface.
*/
unsigned long sq_remap(unsigned long phys, unsigned int size,
const char *name, unsigned long flags)
const char *name, pgprot_t prot)
{
struct sq_mapping *map;
unsigned long end;
Expand Down Expand Up @@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,

map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);

ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
ret = __sq_remap(map, prot);
if (unlikely(ret != 0))
goto out;

Expand Down Expand Up @@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
return -EIO;

if (likely(len)) {
int ret = sq_remap(base, len, "Userspace",
pgprot_val(PAGE_SHARED));
int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
if (ret < 0)
return ret;
} else
Expand Down
Loading

0 comments on commit 77f36fc

Please sign in to comment.