Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 133342
b: refs/heads/master
c: c203518
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mundt committed Mar 17, 2009
1 parent e62e295 commit d6c6a75
Show file tree
Hide file tree
Showing 2 changed files with 93 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3a3b311ca375a37b29bb78b030f96bf97dee97f5
refs/heads/master: c20351846efcb755ba849d9fb701fbd9a1ffb7c2
100 changes: 92 additions & 8 deletions trunk/arch/sh/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,106 @@
#endif

#ifndef __ASSEMBLY__
#include <linux/pagemap.h>

#ifdef CONFIG_MMU
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>

/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
unsigned long start, end;
};

#define tlb_start_vma(tlb, vma) \
flush_cache_range(vma, vma->vm_start, vma->vm_end)
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);

#define tlb_end_vma(tlb, vma) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end)
static inline void init_tlb_gather(struct mmu_gather *tlb)
{
tlb->start = TASK_SIZE;
tlb->end = 0;

#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
if (tlb->fullmm) {
tlb->start = 0;
tlb->end = TASK_SIZE;
}
}

static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);

tlb->mm = mm;
tlb->fullmm = full_mm_flush;

init_tlb_gather(tlb);

return tlb;
}

static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (tlb->fullmm)
flush_tlb_mm(tlb->mm);

/* keep the page table cache within bounds */
check_pgt_cache();

put_cpu_var(mmu_gathers);
}

static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + PAGE_SIZE)
tlb->end = address + PAGE_SIZE;
}

/*
* Flush whole TLBs for MM
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm)
flush_cache_range(vma, vma->vm_start, vma->vm_end);
}

static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm && tlb->end) {
flush_tlb_range(vma, tlb->start, tlb->end);
init_tlb_gather(tlb);
}
}

#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp)

#define tlb_migrate_finish(mm) do { } while (0)

#else /* CONFIG_MMU */

#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_flush(tlb) do { } while (0)

#include <linux/pagemap.h>
#include <asm-generic/tlb.h>

#endif /* CONFIG_MMU */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_H */

0 comments on commit d6c6a75

Please sign in to comment.