Skip to content

Commit

Permalink
[ARM] mm 5: Use mem_types table in ioremap
Browse files Browse the repository at this point in the history
We really want to be using the memory type table in ioremap, so we
only have to do the CPU type fixups in one place.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
  • Loading branch information
Russell King authored and Russell King committed Apr 21, 2007
1 parent 24e6c69 commit b29e9f5
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 46 deletions.
65 changes: 27 additions & 38 deletions arch/arm/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,19 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>

#include <asm/mach/map.h>
#include "mm.h"

/*
* Used by ioremap() and iounmap() code to mark (super)section-mapped
* I/O regions in vm_struct->flags field.
*/
#define VM_ARM_SECTION_MAPPING 0x80000000

static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
unsigned long phys_addr, pgprot_t prot)
unsigned long phys_addr, const struct mem_type *type)
{
pgprot_t prot = __pgprot(type->prot_pte);
pte_t *pte;

pte = pte_alloc_kernel(pmd, addr);
Expand All @@ -63,7 +67,7 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,

static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys_addr,
pgprot_t prot)
const struct mem_type *type)
{
unsigned long next;
pmd_t *pmd;
Expand All @@ -75,7 +79,7 @@ static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,

do {
next = pmd_addr_end(addr, end);
ret = remap_area_pte(pmd, addr, next, phys_addr, prot);
ret = remap_area_pte(pmd, addr, next, phys_addr, type);
if (ret)
return ret;
phys_addr += next - addr;
Expand All @@ -84,21 +88,19 @@ static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
}

static int remap_area_pages(unsigned long start, unsigned long pfn,
unsigned long size, unsigned long flags)
size_t size, const struct mem_type *type)
{
unsigned long addr = start;
unsigned long next, end = start + size;
unsigned long phys_addr = __pfn_to_phys(pfn);
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE | flags);
pgd_t *pgd;
int err = 0;

BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
err = remap_area_pmd(pgd, addr, next, phys_addr, prot);
err = remap_area_pmd(pgd, addr, next, phys_addr, type);
if (err)
break;
phys_addr += next - addr;
Expand Down Expand Up @@ -178,9 +180,9 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)

static int
remap_area_sections(unsigned long virt, unsigned long pfn,
unsigned long size, unsigned long flags)
size_t size, const struct mem_type *type)
{
unsigned long prot, addr = virt, end = virt + size;
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;

/*
Expand All @@ -189,23 +191,13 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
*/
unmap_area_sections(virt, size);

prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));

/*
* ARMv6 and above need XN set to prevent speculative prefetches
* hitting IO.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6)
prot |= PMD_SECT_XN;

pgd = pgd_offset_k(addr);
do {
pmd_t *pmd = pmd_offset(pgd, addr);

pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
flush_pmd_entry(pmd);

Expand All @@ -218,9 +210,9 @@ remap_area_sections(unsigned long virt, unsigned long pfn,

static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
unsigned long size, unsigned long flags)
size_t size, const struct mem_type *type)
{
unsigned long prot, addr = virt, end = virt + size;
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;

/*
Expand All @@ -229,22 +221,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
*/
unmap_area_sections(virt, size);

prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
PMD_DOMAIN(DOMAIN_IO) |
(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));

/*
* ARMv6 and above need XN set to prevent speculative prefetches
* hitting IO.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6)
prot |= PMD_SECT_XN;

pgd = pgd_offset_k(virt);
do {
unsigned long super_pmd_val, i;

super_pmd_val = __pfn_to_phys(pfn) | prot;
super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
PMD_SECT_SUPER;
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

for (i = 0; i < 8; i++) {
Expand Down Expand Up @@ -282,6 +264,8 @@ void __iomem *
__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned long flags)
{
const struct mem_type *type;
struct mem_type t;
int err;
unsigned long addr;
struct vm_struct * area;
Expand All @@ -292,6 +276,11 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;

t = *get_mem_type(MT_DEVICE);
t.prot_sect |= flags;
t.prot_pte |= flags;
type = &t;

size = PAGE_ALIGN(size);

area = get_vm_area(size, VM_IOREMAP);
Expand All @@ -305,13 +294,13 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
cpu_is_xsc3()) && pfn >= 0x100000 &&
!((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_supersections(addr, pfn, size, flags);
err = remap_area_supersections(addr, pfn, size, type);
} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, flags);
err = remap_area_sections(addr, pfn, size, type);
} else
#endif
err = remap_area_pages(addr, pfn, size, flags);
err = remap_area_pages(addr, pfn, size, type);

if (err) {
vunmap((void *)addr);
Expand Down
9 changes: 9 additions & 0 deletions arch/arm/mm/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,15 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
return pmd_off(pgd_offset_k(virt), virt);
}

struct mem_type {
unsigned int prot_pte;
unsigned int prot_l1;
unsigned int prot_sect;
unsigned int domain;
};

const struct mem_type *get_mem_type(unsigned int type);

#endif

struct map_desc;
Expand Down
14 changes: 6 additions & 8 deletions arch/arm/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,14 +176,7 @@ void adjust_cr(unsigned long mask, unsigned long set)
}
#endif

struct mem_type {
unsigned int prot_pte;
unsigned int prot_l1;
unsigned int prot_sect;
unsigned int domain;
};

static struct mem_type mem_types[] __initdata = {
static struct mem_type mem_types[] = {
[MT_DEVICE] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
Expand Down Expand Up @@ -237,6 +230,11 @@ static struct mem_type mem_types[] __initdata = {
}
};

const struct mem_type *get_mem_type(unsigned int type)
{
return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
}

/*
* Adjust the PMD section entries according to the CPU in use.
*/
Expand Down

0 comments on commit b29e9f5

Please sign in to comment.