Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 212386
b: refs/heads/master
c: c017780
h: refs/heads/master
v: v3
  • Loading branch information
Catalin Marinas authored and Russell King committed Sep 19, 2010
1 parent afd11a8 commit 2aa39da
Show file tree
Hide file tree
Showing 9 changed files with 18 additions and 11 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0fc73099dd25df2c5181b7bad57d1faa5cd12d3c
refs/heads/master: c01778001a4f5ad9c62d882776235f3f31922fdd
6 changes: 3 additions & 3 deletions trunk/arch/arm/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,10 @@
#endif

/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
#define PG_dcache_clean PG_arch_1

/*
* MM Cache Management
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif

/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/mm/copypage-v4mc.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
{
void *kto = kmap_atomic(to, KM_USER1);

if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);

spin_lock(&minicache_lock);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/mm/copypage-v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;

if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);

/* FIXME: not highmem safe */
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/mm/copypage-xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
{
void *kto = kmap_atomic(to, KM_USER1);

if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);

spin_lock(&minicache_lock);
Expand Down
6 changes: 6 additions & 0 deletions trunk/arch/arm/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
outer_inv_range(paddr, paddr + size);

dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);

/*
* Mark the D-cache clean for this page to avoid extra flushing.
*/
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(___dma_page_dev_to_cpu);

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/arm/mm/fault-armv.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
* a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of:
*
* 1. If PG_dcache_dirty is set for the page, we need to ensure
* 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory
* range are written back to the page.
* 2. If we have multiple shared mappings of the same space in
Expand Down Expand Up @@ -169,7 +169,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,

mapping = page_mapping(page);
#ifndef CONFIG_SMP
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
#endif
if (mapping) {
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/arm/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ void flush_dcache_page(struct page *page)

#ifndef CONFIG_SMP
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
clear_bit(PG_dcache_clean, &page->flags);
else
#endif
{
Expand All @@ -257,6 +257,7 @@ void flush_dcache_page(struct page *page)
__flush_dcache_aliases(mapping, page);
else if (mapping)
__flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
Expand Down

0 comments on commit 2aa39da

Please sign in to comment.