Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 186489
b: refs/heads/master
c: 90e7d64
h: refs/heads/master
i:
  186487: 5a8544d
v: v3
  • Loading branch information
Paul Mundt committed Mar 2, 2010
1 parent 6142c4c commit 44e071b
Show file tree
Hide file tree
Showing 6 changed files with 224 additions and 169 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 94316cdadb0067ba6d1f08b9a6f84fe755bdaa38
refs/heads/master: 90e7d649d86f21d478dc134f74c88e19dd472393
23 changes: 10 additions & 13 deletions trunk/arch/sh/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,21 +291,21 @@ unsigned long long poke_real_address_q(unsigned long long addr,
* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
void __iounmap(void __iomem *addr);

static inline void __iomem *
__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
}

static inline void __iomem *
__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
#ifdef CONFIG_29BIT
unsigned long last_addr = offset + size - 1;
phys_addr_t last_addr = offset + size - 1;

/*
* For P1 and P2 space this is trivial, as everything is already
Expand All @@ -329,7 +329,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
}

static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
void __iomem *ret;

Expand All @@ -349,35 +349,32 @@ __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */

static inline void __iomem *
ioremap(unsigned long offset, unsigned long size)
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}

static inline void __iomem *
ioremap_cache(unsigned long offset, unsigned long size)
ioremap_cache(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL);
}

#ifdef CONFIG_HAVE_IOREMAP_PROT
static inline void __iomem *
ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
{
return __ioremap_mode(offset, size, __pgprot(flags));
}
#endif

#ifdef CONFIG_IOREMAP_FIXED
extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
unsigned long, pgprot_t);
extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
extern int iounmap_fixed(void __iomem *);
extern void ioremap_fixed_init(void);
#else
static inline void __iomem *
ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
unsigned long size, pgprot_t prot)
ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
BUG();
return NULL;
Expand Down
31 changes: 24 additions & 7 deletions trunk/arch/sh/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,19 +55,29 @@ typedef struct {

#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, pgprot_t prot);
void pmb_unmap(unsigned long addr);
void pmb_init(void);
bool __in_29bit_mode(void);

void pmb_init(void);
int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
unsigned long size, pgprot_t prot);
void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
pgprot_t prot, void *caller);
int pmb_unmap(void __iomem *addr);

#else
static inline long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, pgprot_t prot)

static inline void __iomem *
pmb_remap_caller(phys_addr_t phys, unsigned long size,
pgprot_t prot, void *caller)
{
return NULL;
}

static inline int pmb_unmap(void __iomem *addr)
{
return -EINVAL;
}

#define pmb_unmap(addr) do { } while (0)
#define pmb_init(addr) do { } while (0)

#ifdef CONFIG_29BIT
Expand All @@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys,
#endif

#endif /* CONFIG_PMB */

static inline void __iomem *
pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
{
return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
}

#endif /* __ASSEMBLY__ */

#endif /* __MMU_H */
70 changes: 22 additions & 48 deletions trunk/arch/sh/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,30 +34,39 @@
* caller shouldn't need to know that small detail.
*/
void __iomem * __init_refok
__ioremap_caller(unsigned long phys_addr, unsigned long size,
__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
pgprot_t pgprot, void *caller)
{
struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
void __iomem *mapped;

/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;

/*
* If we can't yet use the regular approach, go the fixmap route.
*/
if (!mem_init_done)
return ioremap_fixed(phys_addr, size, pgprot);

/*
* First try to remap through the PMB.
* PMB entries are all pre-faulted.
*/
mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
if (mapped && !IS_ERR(mapped))
return mapped;

/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;

/*
* If we can't yet use the regular approach, go the fixmap route.
*/
if (!mem_init_done)
return ioremap_fixed(phys_addr, offset, size, pgprot);

/*
* Ok, go for it..
*/
Expand All @@ -67,33 +76,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr;

#ifdef CONFIG_PMB
/*
* First try to remap through the PMB once a valid VMA has been
* established. Smaller allocations (or the rest of the size
* remaining after a PMB mapping due to the size not being
* perfectly aligned on a PMB size boundary) are then mapped
* through the UTLB using conventional page tables.
*
* PMB entries are all pre-faulted.
*/
if (unlikely(phys_addr >= P1SEG)) {
unsigned long mapped;

mapped = pmb_remap(addr, phys_addr, size, pgprot);
if (likely(mapped)) {
addr += mapped;
phys_addr += mapped;
size -= mapped;
}
if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
vunmap((void *)orig_addr);
return NULL;
}
#endif

if (likely(size))
if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
vunmap((void *)orig_addr);
return NULL;
}

return (void __iomem *)(offset + (char *)orig_addr);
}
Expand Down Expand Up @@ -133,23 +119,11 @@ void __iounmap(void __iomem *addr)
if (iounmap_fixed(addr) == 0)
return;

#ifdef CONFIG_PMB
/*
* Purge any PMB entries that may have been established for this
* mapping, then proceed with conventional VMA teardown.
*
* XXX: Note that due to the way that remove_vm_area() does
* matching of the resultant VMA, we aren't able to fast-forward
* the address past the PMB space until the end of the VMA where
* the page tables reside. As such, unmap_vm_area() will be
* forced to linearly scan over the area until it finds the page
* tables where PTEs that need to be unmapped actually reside,
* which is far from optimal. Perhaps we need to use a separate
* VMA for the PMB mappings?
* -- PFM.
* If the PMB handled it, there's nothing else to do.
*/
pmb_unmap(vaddr);
#endif
if (pmb_unmap(addr) == 0)
return;

p = remove_vm_area((void *)(vaddr & PAGE_MASK));
if (!p) {
Expand Down
11 changes: 9 additions & 2 deletions trunk/arch/sh/mm/ioremap_fixed.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,21 @@ void __init ioremap_fixed_init(void)
}

void __init __iomem *
ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
unsigned long size, pgprot_t prot)
ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
enum fixed_addresses idx0, idx;
struct ioremap_map *map;
unsigned int nrpages;
unsigned long offset;
int i, slot;

/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(phys_addr + size) - phys_addr;

slot = -1;
for (i = 0; i < FIX_N_IOREMAPS; i++) {
map = &ioremap_maps[i];
Expand Down
Loading

0 comments on commit 44e071b

Please sign in to comment.