Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 218722
b: refs/heads/master
c: 492e675
h: refs/heads/master
v: v3
  • Loading branch information
David Howells committed Oct 27, 2010
1 parent 6d55dcd commit 575dd0d
Show file tree
Hide file tree
Showing 7 changed files with 43 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8f19e3daf3fffee9e18a8812067a6a4b538ae6c8
refs/heads/master: 492e675116003b99dfcf0fa70084027e86bc0161
4 changes: 2 additions & 2 deletions trunk/arch/mn10300/include/asm/highmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
BUG();
#endif
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
__flush_tlb_one(vaddr);
local_flush_tlb_one(vaddr);

return vaddr;
}
Expand Down Expand Up @@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
* this pte without first remap it
*/
pte_clear(kmap_pte - idx);
__flush_tlb_one(vaddr);
local_flush_tlb_one(vaddr);
}
#endif
pagefault_enable();
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/mn10300/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
/* we exhausted the TLB PIDs of this version on this CPU, so we
* flush this CPU's TLB in its entirety and start new cycle */
flush_tlb_all();
local_flush_tlb_all();

/* fix the TLB version if needed (we avoid version #0 so as to
* distingush MMU_NO_CONTEXT) */
Expand Down
56 changes: 35 additions & 21 deletions trunk/arch/mn10300/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,37 @@

#include <asm/processor.h>

#define __flush_tlb() \
do { \
int w; \
__asm__ __volatile__ \
(" mov %1,%0 \n" \
" or %2,%0 \n" \
" mov %0,%1 \n" \
: "=d"(w) \
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
: "cc", "memory" \
); \
} while (0)
/**
* local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
*/
static inline void local_flush_tlb(void)
{
int w;
asm volatile(
" mov %1,%0 \n"
" or %2,%0 \n"
" mov %0,%1 \n"
: "=d"(w)
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
: "cc", "memory");
}

#define __flush_tlb_all() __flush_tlb()
#define __flush_tlb_one(addr) __flush_tlb()
/**
* local_flush_tlb_all - Flush all entries from the local CPU's TLBs
*/
#define local_flush_tlb_all() local_flush_tlb()

/**
* local_flush_tlb_one - Flush one entry from the local CPU's TLBs
*/
#define local_flush_tlb_one(addr) local_flush_tlb()

/**
* local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
* @mm: The MM to flush for
* @addr: The address of the target page in RAM (not its page struct)
*/
extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);


/*
Expand All @@ -43,14 +59,14 @@ do { \
#define flush_tlb_all() \
do { \
preempt_disable(); \
__flush_tlb_all(); \
local_flush_tlb_all(); \
preempt_enable(); \
} while (0)

#define flush_tlb_mm(mm) \
do { \
preempt_disable(); \
__flush_tlb_all(); \
local_flush_tlb_all(); \
preempt_enable(); \
} while (0)

Expand All @@ -59,22 +75,20 @@ do { \
unsigned long __s __attribute__((unused)) = (start); \
unsigned long __e __attribute__((unused)) = (end); \
preempt_disable(); \
__flush_tlb_all(); \
local_flush_tlb_all(); \
preempt_enable(); \
} while (0)

#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
#define flush_tlb() flush_tlb_all()

#define __flush_tlb_global() flush_tlb_all()
#define flush_tlb() flush_tlb_all()
#define flush_tlb_kernel_range(start, end) \
do { \
unsigned long __s __attribute__((unused)) = (start); \
unsigned long __e __attribute__((unused)) = (end); \
flush_tlb_all(); \
} while (0)

extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);

#define flush_tlb_pgtables(mm, start, end) do {} while (0)

#endif /* _ASM_TLBFLUSH_H */
2 changes: 1 addition & 1 deletion trunk/arch/mn10300/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);

__flush_tlb_all();
local_flush_tlb_all();
}

/*
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/mn10300/mm/mmu-context.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ unsigned long mmu_context_cache[NR_CPUS] = {
/*
* flush the specified TLB entry
*/
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
{
unsigned long pteu, cnx, flags;

Expand All @@ -33,7 +33,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
* interference from vmalloc'd regions */
local_irq_save(flags);

cnx = mm_context(vma->vm_mm);
cnx = mm_context(mm);

if (cnx != MMU_NO_CONTEXT) {
pteu = addr | (cnx & 0x000000ffUL);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/mn10300/mm/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
__flush_tlb_one(vaddr);
local_flush_tlb_one(vaddr);
}

pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
Expand Down

0 comments on commit 575dd0d

Please sign in to comment.