Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 37735
b: refs/heads/master
c: 6606c3e
h: refs/heads/master
i:
  37733: ee03fd4
  37731: 6773b76
  37727: 95a289f
v: v3
  • Loading branch information
Zachary Amsden authored and Linus Torvalds committed Oct 1, 2006
1 parent 9ec130b commit 665a004
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9888a1cae3f859db38b9604e3df1c02177161bb0
refs/heads/master: 6606c3e0da5360799e07ae24b05080cc85c68e72
20 changes: 20 additions & 0 deletions trunk/include/asm-generic/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif

/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
* is issued. Some architectures may benefit from doing this, and it is
* beneficial for both shadow and direct mode hypervisors, which may batch
* the PTE updates which happen during this window. Note that using this
* interface requires that read hazards be removed from the code. A read
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
* up to date. This mode can only be entered and left under the protection of
* the page table locks for all page tables which may be modified. In the UP
* case, this is required so that preemption is disabled, and in the SMP case,
* it must synchronize the delayed page table writes properly on other CPUs.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#endif

/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
Expand Down
8 changes: 8 additions & 0 deletions trunk/mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
arch_enter_lazy_mmu_mode();

do {
/*
Expand All @@ -527,6 +528,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);

arch_leave_lazy_mmu_mode();
spin_unlock(src_ptl);
pte_unmap_nested(src_pte - 1);
add_mm_rss(dst_mm, rss[0], rss[1]);
Expand Down Expand Up @@ -628,6 +630,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
int anon_rss = 0;

pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
if (pte_none(ptent)) {
Expand Down Expand Up @@ -694,6 +697,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));

add_mm_rss(mm, file_rss, anon_rss);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);

return addr;
Expand Down Expand Up @@ -1109,6 +1113,7 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
struct page *page = ZERO_PAGE(addr);
pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
Expand All @@ -1118,6 +1123,7 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, zero_pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
return 0;
}
Expand Down Expand Up @@ -1275,11 +1281,13 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
return 0;
}
Expand Down
2 changes: 2 additions & 0 deletions trunk/mm/mprotect.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
spinlock_t *ptl;

pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
do {
oldpte = *pte;
if (pte_present(oldpte)) {
Expand Down Expand Up @@ -70,6 +71,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
}

} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
}

Expand Down
2 changes: 2 additions & 0 deletions trunk/mm/mremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
arch_enter_lazy_mmu_mode();

for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
new_pte++, new_addr += PAGE_SIZE) {
Expand All @@ -109,6 +110,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
set_pte_at(mm, new_addr, new_pte, pte);
}

arch_leave_lazy_mmu_mode();
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap_nested(new_pte - 1);
Expand Down

0 comments on commit 665a004

Please sign in to comment.