Skip to content

Commit

Permalink
Merge branch 'cache' (early part)
Browse files Browse the repository at this point in the history
  • Loading branch information
Russell King committed Dec 17, 2009
2 parents c0caac9 + bf32eb8 commit 6665398
Show file tree
Hide file tree
Showing 29 changed files with 224 additions and 155 deletions.
12 changes: 4 additions & 8 deletions arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size);

/*
* DMA buffers must have the same cache properties
* as if they were really used for DMA - which means
* data must be written back to RAM. Note that
* we don't use dmac_flush_range() here for the
* bidirectional case because we know the cache
* lines will be coherent with the data written.
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
dmac_clean_range(ptr, ptr + size);
outer_clean_range(__pa(ptr), __pa(ptr) + size);
__cpuc_flush_kernel_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}
Expand Down
17 changes: 5 additions & 12 deletions arch/arm/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ struct cpu_cache_fns {

void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *);
void (*flush_kern_dcache_area)(void *, size_t);

void (*dma_inv_range)(const void *, const void *);
void (*dma_clean_range)(const void *, const void *);
Expand All @@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area

/*
* These are private to the dma-mapping API. Do not use directly.
Expand All @@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)

extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
extern void __cpuc_flush_dcache_area(void *, size_t);

/*
* These are private to the dma-mapping API. Do not use directly.
Expand Down Expand Up @@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
/* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
__cpuc_flush_dcache_page(page_address(page));
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}

#define flush_dcache_mmap_lock(mapping) \
Expand All @@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
*/
#define flush_icache_page(vma,page) do { } while (0)

static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
unsigned offset, size_t size)
{
const void *start = (void __force *)virt + offset;
dmac_inv_range(start, start + size);
}

/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
Expand Down
11 changes: 6 additions & 5 deletions arch/arm/mm/cache-fa.S
Original file line number Diff line number Diff line change
Expand Up @@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
mov pc, lr

/*
* flush_kern_dcache_page(kaddr)
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
* - addr - kernel address
* - size - size of region
*/
ENTRY(fa_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
ENTRY(fa_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
Expand Down Expand Up @@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_user_cache_range
.long fa_coherent_kern_range
.long fa_coherent_user_range
.long fa_flush_kern_dcache_page
.long fa_flush_kern_dcache_area
.long fa_dma_inv_range
.long fa_dma_clean_range
.long fa_dma_flush_range
Expand Down
93 changes: 72 additions & 21 deletions arch/arm/mm/cache-l2x0.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,69 +28,120 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);

static inline void sync_writel(unsigned long val, unsigned long reg,
unsigned long complete_mask)
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
unsigned long flags;

spin_lock_irqsave(&l2x0_lock, flags);
writel(val, l2x0_base + reg);
/* wait for the operation to complete */
while (readl(l2x0_base + reg) & complete_mask)
while (readl(reg) & mask)
;
spin_unlock_irqrestore(&l2x0_lock, flags);
}

static inline void cache_sync(void)
{
sync_writel(0, L2X0_CACHE_SYNC, 1);
void __iomem *base = l2x0_base;
writel(0, base + L2X0_CACHE_SYNC);
cache_wait(base + L2X0_CACHE_SYNC, 1);
}

static inline void l2x0_inv_all(void)
{
unsigned long flags;

/* invalidate all ways */
sync_writel(0xff, L2X0_INV_WAY, 0xff);
spin_lock_irqsave(&l2x0_lock, flags);
writel(0xff, l2x0_base + L2X0_INV_WAY);
cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}

static void l2x0_inv_range(unsigned long start, unsigned long end)
{
unsigned long addr;
void __iomem *base = l2x0_base;
unsigned long flags;

spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}

if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(end, base + L2X0_CLEAN_INV_LINE_PA);
}

for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
sync_writel(addr, L2X0_INV_LINE_PA, 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);

while (start < blk_end) {
cache_wait(base + L2X0_INV_LINE_PA, 1);
writel(start, base + L2X0_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}

if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}

static void l2x0_clean_range(unsigned long start, unsigned long end)
{
unsigned long addr;
void __iomem *base = l2x0_base;
unsigned long flags;

spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);

while (start < blk_end) {
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel(start, base + L2X0_CLEAN_LINE_PA);
start += CACHE_LINE_SIZE;
}

if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}

static void l2x0_flush_range(unsigned long start, unsigned long end)
{
unsigned long addr;
void __iomem *base = l2x0_base;
unsigned long flags;

spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);

while (start < blk_end) {
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}

if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}

void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
Expand Down
9 changes: 5 additions & 4 deletions arch/arm/mm/cache-v3.S
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
mov pc, lr

/*
* flush_kern_dcache_page(void *page)
* flush_kern_dcache_area(void *page, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
* - addr - kernel address
* - size - region size
*/
ENTRY(v3_flush_kern_dcache_page)
ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */

/*
Expand Down Expand Up @@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_coherent_user_range
.long v3_flush_kern_dcache_page
.long v3_flush_kern_dcache_area
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
Expand Down
9 changes: 5 additions & 4 deletions arch/arm/mm/cache-v4.S
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
mov pc, lr

/*
* flush_kern_dcache_page(void *page)
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
* - addr - kernel address
* - size - region size
*/
ENTRY(v4_flush_kern_dcache_page)
ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */

/*
Expand Down Expand Up @@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_coherent_user_range
.long v4_flush_kern_dcache_page
.long v4_flush_kern_dcache_area
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
Expand Down
11 changes: 6 additions & 5 deletions arch/arm/mm/cache-v4wb.S
Original file line number Diff line number Diff line change
Expand Up @@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov pc, lr

/*
* flush_kern_dcache_page(void *page)
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
* - addr - kernel address
* - size - region size
*/
ENTRY(v4wb_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
ENTRY(v4wb_flush_kern_dcache_area)
add r1, r0, r1
/* fall through */

/*
Expand Down Expand Up @@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_coherent_user_range
.long v4wb_flush_kern_dcache_page
.long v4wb_flush_kern_dcache_area
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
Expand Down
11 changes: 6 additions & 5 deletions arch/arm/mm/cache-v4wt.S
Original file line number Diff line number Diff line change
Expand Up @@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
mov pc, lr

/*
* flush_kern_dcache_page(void *page)
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
* - addr - kernel address
* - size - region size
*/
ENTRY(v4wt_flush_kern_dcache_page)
ENTRY(v4wt_flush_kern_dcache_area)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ
add r1, r0, r1
/* fallthrough */

/*
Expand Down Expand Up @@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_coherent_user_range
.long v4wt_flush_kern_dcache_page
.long v4wt_flush_kern_dcache_area
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
Expand Down
11 changes: 6 additions & 5 deletions arch/arm/mm/cache-v6.S
Original file line number Diff line number Diff line change
Expand Up @@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)

/*
* v6_flush_kern_dcache_page(kaddr)
* v6_flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
* - addr - kernel address
* - size - region size
*/
ENTRY(v6_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
ENTRY(v6_flush_kern_dcache_area)
add r1, r0, r1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
Expand Down Expand Up @@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_user_cache_range
.long v6_coherent_kern_range
.long v6_coherent_user_range
.long v6_flush_kern_dcache_page
.long v6_flush_kern_dcache_area
.long v6_dma_inv_range
.long v6_dma_clean_range
.long v6_dma_flush_range
Expand Down
Loading

0 comments on commit 6665398

Please sign in to comment.