Skip to content

Commit

Permalink
Merge branch 'highmem' into devel
Browse files Browse the repository at this point in the history
  • Loading branch information
Russell King authored and Russell King committed Mar 24, 2009
2 parents 9a38e98 + 053a96c commit fbf2b1f
Show file tree
Hide file tree
Showing 22 changed files with 557 additions and 79 deletions.
9 changes: 8 additions & 1 deletion Documentation/arm/memory.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,14 @@ ffff0000 ffff0fff CPU vector page.
CPU supports vector relocation (control
register V bit.)

ffc00000 fffeffff DMA memory mapping region. Memory returned
fffe0000 fffeffff XScale cache flush area. This is used
in proc-xscale.S to flush the whole data
cache. Free for other usage on non-XScale.

fff00000 fffdffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here.

ffc00000 ffefffff DMA memory mapping region. Memory returned
by the dma_alloc_xxx functions will be
dynamically mapped here.

Expand Down
17 changes: 17 additions & 0 deletions arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -939,6 +939,23 @@ config NODES_SHIFT
default "2"
depends on NEED_MULTIPLE_NODES

config HIGHMEM
bool "High Memory Support (EXPERIMENTAL)"
depends on MMU && EXPERIMENTAL
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
space as well as some memory mapped IO. That means that, if you
have a large amount of physical memory and/or IO, not all of the
memory can be "permanently mapped" by the kernel. The physical
memory that is not permanently mapped is called "high memory".

Depending on the selected kernel/user memory split, minimum
vmalloc space and actual amount of RAM, you may not need this
option which should result in a slightly faster kernel.

If unsure, say n.

source "mm/Kconfig"

config LEDS
Expand Down
7 changes: 7 additions & 0 deletions arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/page-flags.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
Expand Down Expand Up @@ -349,6 +350,12 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,

BUG_ON(!valid_dma_direction(dir));

if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
"is not supported\n");
return ~0;
}

return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(dma_map_page);
Expand Down
14 changes: 13 additions & 1 deletion arch/arm/include/asm/dma-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,20 @@
* must not be used by drivers.
*/
#ifndef __arch_page_to_dma

#if !defined(CONFIG_HIGHMEM)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
}
#elif defined(__pfn_to_bus)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
#else
#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
#endif

static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
Expand Down Expand Up @@ -57,6 +67,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
extern void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, int rw);

/*
* Return whether the given device DMA address mask can be supported
Expand Down Expand Up @@ -316,7 +328,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
BUG_ON(!valid_dma_direction(dir));

if (!arch_is_coherent())
dma_cache_maint(page_address(page) + offset, size, dir);
dma_cache_maint_page(page, offset, size, dir);

return page_to_dma(dev, page) + offset;
}
Expand Down
41 changes: 41 additions & 0 deletions arch/arm/include/asm/fixmap.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H

/*
* Nothing too fancy for now.
*
* On ARM we already have well known fixed virtual addresses imposed by
* the architecture such as the vector page which is located at 0xffff0000,
* therefore a second level page table is already allocated covering
* 0xfff00000 upwards.
*
* The cache flushing code in proc-xscale.S uses the virtual area between
* 0xfffe0000 and 0xfffeffff.
*/

#define FIXADDR_START 0xfff00000UL
#define FIXADDR_TOP 0xfffe0000UL
#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)

#define FIX_KMAP_BEGIN 0
#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)

#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)

extern void __this_fixmap_does_not_exist(void);

static inline unsigned long fix_to_virt(const unsigned int idx)
{
if (idx >= FIX_KMAP_END)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}

static inline unsigned int virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}

#endif
31 changes: 31 additions & 0 deletions arch/arm/include/asm/highmem.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H

#include <asm/kmap_types.h>

#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))

#define kmap_prot PAGE_KERNEL

#define flush_cache_kmaps() flush_cache_all()

extern pte_t *pkmap_page_table;

#define ARCH_NEEDS_KMAP_HIGH_GET

extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);

extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type);
extern void kunmap_atomic(void *kvaddr, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern struct page *kmap_atomic_to_page(const void *ptr);

#endif
1 change: 1 addition & 0 deletions arch/arm/include/asm/kmap_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ enum km_type {
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_L2_CACHE,
KM_TYPE_NR
};

Expand Down
14 changes: 11 additions & 3 deletions arch/arm/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,20 @@
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - 16*1048576)

#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
#if TASK_SIZE > MODULES_VADDR
#error Top of user space clashes with start of module space
#endif

/*
* The highmem pkmap virtual space shares the end of the module area.
*/
#ifdef CONFIG_HIGHMEM
#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
#else
#define MODULES_END (PAGE_OFFSET)
#endif

/*
* The XIP kernel gets mapped at the bottom of the module vm area.
* Since we use sections to map it, this macro replaces the physical address
Expand Down Expand Up @@ -181,6 +188,7 @@ static inline void *phys_to_virt(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
#define __pfn_to_bus(x) ((x) << PAGE_SHIFT)
#endif

static inline __deprecated unsigned long virt_to_bus(void *x)
Expand Down
5 changes: 4 additions & 1 deletion arch/arm/mach-iop13xx/include/mach/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
})

#define __arch_page_to_dma(dev, page) \
__arch_virt_to_dma(dev, page_address(page))
({ \
/* __is_lbus_virt() can never be true for RAM pages */ \
(dma_addr_t)page_to_phys(page); \
})

#endif /* CONFIG_ARCH_IOP13XX */
#endif /* !ASSEMBLY */
Expand Down
6 changes: 5 additions & 1 deletion arch/arm/mach-ks8695/include/mach/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,11 @@ extern struct bus_type platform_bus_type;
__phys_to_virt(x) : __bus_to_virt(x)); })
#define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x))
#define __arch_page_to_dma(dev, x) \
({ dma_addr_t __dma = page_to_phys(page); \
if (!is_lbus_device(dev)) \
__dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \
__dma; })

#endif

Expand Down
1 change: 1 addition & 0 deletions arch/arm/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o

obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_HIGHMEM) += highmem.o

obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
Expand Down
54 changes: 37 additions & 17 deletions arch/arm/mm/cache-feroceon-l2.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,12 @@

#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <plat/cache-feroceon-l2.h>

#include "mm.h"

/*
* Low-level cache maintenance operations.
Expand All @@ -34,14 +38,36 @@
* The range operations require two successive cp15 writes, in
* between which we don't want to be preempted.
*/

static inline unsigned long l2_start_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
/*
* Let's do our own fixmap stuff in a minimal way here.
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping. This is protected with the disabling of
* interrupts by the caller.
*/
unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(vaddr);
return vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);
#endif
}

static inline void l2_clean_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
}

static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
{
unsigned long flags;
unsigned long va_start, va_end, flags;

/*
* Make sure 'start' and 'end' reference the same page, as
Expand All @@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
BUG_ON((start ^ end) >> PAGE_SHIFT);

raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_end = va_start + (end - start);
__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
"mcr p15, 1, %1, c15, c9, 5"
: : "r" (start), "r" (end));
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
}

static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
{
l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end));
}

static inline void l2_clean_inv_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
Expand All @@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr)
__asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
}

static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
{
unsigned long flags;
unsigned long va_start, va_end, flags;

/*
* Make sure 'start' and 'end' reference the same page, as
Expand All @@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
BUG_ON((start ^ end) >> PAGE_SHIFT);

raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_end = va_start + (end - start);
__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
"mcr p15, 1, %1, c15, c11, 5"
: : "r" (start), "r" (end));
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
}

static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
{
l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end));
}


/*
* Linux primitives.
Expand Down
Loading

0 comments on commit fbf2b1f

Please sign in to comment.