diff --git a/[refs] b/[refs] index d9429d24abf5..42341e9f2fbd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 9df5f74194871ebd0e51ef5ad2eca5084acaaaba +refs/heads/master: ef7cc35b0ee03431731186320b18e5da585341ff diff --git a/trunk/arch/parisc/include/asm/cacheflush.h b/trunk/arch/parisc/include/asm/cacheflush.h index 7a73b615c23d..477277739da5 100644 --- a/trunk/arch/parisc/include/asm/cacheflush.h +++ b/trunk/arch/parisc/include/asm/cacheflush.h @@ -38,6 +38,18 @@ void flush_cache_mm(struct mm_struct *mm); #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); +/* vmap range flushes and invalidates. Architecturally, we don't need + * the invalidate, because the CPU should refuse to speculate once an + * area has been flushed, so invalidate is left empty */ +static inline void flush_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + flush_kernel_dcache_range_asm(start, start + size); +} +static inline void invalidate_kernel_vmap_range(void *vaddr, int size) +{ +} #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()