Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 233492
b: refs/heads/master
c: 06824ba
h: refs/heads/master
v: v3
  • Loading branch information
Russell King committed Feb 21, 2011
1 parent ff23232 commit fbead20
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 14 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a9ad21fed09cb95d34af9474be0831525b30c4c6
refs/heads/master: 06824ba824b3e9f2fedb38bee79af0643198ed7f
102 changes: 89 additions & 13 deletions trunk/arch/arm/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#define __ASMARM_TLB_H

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#ifndef CONFIG_MMU

Expand All @@ -27,7 +26,23 @@

#else /* !CONFIG_MMU */

#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>

/*
* We need to delay page freeing for SMP as other CPUs can access pages
* which have been removed but not yet had their TLB entries invalidated.
* Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
* we need to apply this same delaying tactic to ensure correct operation.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
#define tlb_fast_mode(tlb) 0
#define FREE_PTE_NR 500
#else
#define tlb_fast_mode(tlb) 1
#define FREE_PTE_NR 0
#endif

/*
* TLB handling. This allows us to remove pages from the page
Expand All @@ -36,28 +51,75 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
struct page *pages[FREE_PTE_NR];
};

DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);

/*
* This is unnecessarily complex. There's three ways the TLB shootdown
* code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
* tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL.
* 2. Unmapping all vmas. See exit_mmap().
* tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL. Additionally, page tables will be freed.
* 3. Unmapping argument pages. See shift_arg_pages().
* tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
* tlb->vma will be NULL.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || !tlb->vma)
flush_tlb_mm(tlb->mm);
else if (tlb->range_end > 0) {
flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}

static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
}

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
}
}

static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);

tlb->mm = mm;
tlb->fullmm = full_mm_flush;
tlb->vma = NULL;
tlb->nr = 0;

return tlb;
}

static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (tlb->fullmm)
flush_tlb_mm(tlb->mm);
tlb_flush_mmu(tlb);

/* keep the page table cache within bounds */
check_pgt_cache();
Expand All @@ -71,12 +133,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
tlb_add_flush(tlb, addr);
}

/*
Expand All @@ -89,6 +146,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
Expand All @@ -97,12 +155,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm && tlb->range_end > 0)
flush_tlb_range(vma, tlb->range_start, tlb->range_end);
if (!tlb->fullmm)
tlb_flush(tlb);
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
} else {
tlb->pages[tlb->nr++] = page;
if (tlb->nr >= FREE_PTE_NR)
tlb_flush_mmu(tlb);
}
}

static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, pte);
}

#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)

#define tlb_migrate_finish(mm) do { } while (0)
Expand Down

0 comments on commit fbead20

Please sign in to comment.