Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 63874
b: refs/heads/master
c: 9535239
h: refs/heads/master
v: v3
  • Loading branch information
Greg Ungerer authored and Linus Torvalds committed Aug 11, 2007
1 parent 323d0ce commit 68d9d4f
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 73c59afc65cfa50c3362b9ce1ec151a79c41dd8e
refs/heads/master: 9535239f6bc99f68e0cfae44505ad402b53ed24c
73 changes: 38 additions & 35 deletions trunk/include/asm-generic/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#define _ASM_GENERIC_PGTABLE_H

#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU

#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
Expand Down Expand Up @@ -132,41 +133,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif

/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
* is issued. Some architectures may benefit from doing this, and it is
* beneficial for both shadow and direct mode hypervisors, which may batch
* the PTE updates which happen during this window. Note that using this
* interface requires that read hazards be removed from the code. A read
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
* up to date. This mode can only be entered and left under the protection of
* the page table locks for all page tables which may be modified. In the UP
* case, this is required so that preemption is disabled, and in the SMP case,
* it must synchronize the delayed page table writes properly on other CPUs.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_flush_lazy_mmu_mode() do {} while (0)
#endif

/*
* A facility to provide batching of the reload of page tables with the
* actual context switch code for paravirtualized guests. By convention,
* only one of the lazy modes (CPU, MMU) should be active at any given
* time, entry should never be nested, and entry and exits should always
* be paired. This is for sanity of maintaining and reasoning about the
* kernel code.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define arch_leave_lazy_cpu_mode() do {} while (0)
#define arch_flush_lazy_cpu_mode() do {} while (0)
#endif

/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
Expand Down Expand Up @@ -233,6 +199,43 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd)
}
return 0;
}
#endif /* CONFIG_MMU */

/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
* is issued. Some architectures may benefit from doing this, and it is
* beneficial for both shadow and direct mode hypervisors, which may batch
* the PTE updates which happen during this window. Note that using this
* interface requires that read hazards be removed from the code. A read
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
* up to date. This mode can only be entered and left under the protection of
* the page table locks for all page tables which may be modified. In the UP
* case, this is required so that preemption is disabled, and in the SMP case,
* it must synchronize the delayed page table writes properly on other CPUs.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
#define arch_flush_lazy_mmu_mode() do {} while (0)
#endif

/*
* A facility to provide batching of the reload of page tables with the
* actual context switch code for paravirtualized guests. By convention,
* only one of the lazy modes (CPU, MMU) should be active at any given
* time, entry should never be nested, and entry and exits should always
* be paired. This is for sanity of maintaining and reasoning about the
* kernel code.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define arch_leave_lazy_cpu_mode() do {} while (0)
#define arch_flush_lazy_cpu_mode() do {} while (0)
#endif

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_GENERIC_PGTABLE_H */

0 comments on commit 68d9d4f

Please sign in to comment.