Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
1
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
2b04725
Documentation
arch
alpha
arc
arm
boot
common
configs
crypto
include
asm
hardware
mach
xen
Kbuild
arch_timer.h
asm-offsets.h
assembler.h
atomic.h
barrier.h
bitops.h
bug.h
bugs.h
cache.h
cacheflush.h
cachetype.h
checksum.h
clkdev.h
cmpxchg.h
compiler.h
cp15.h
cpu.h
cpuidle.h
cputype.h
cti.h
delay.h
device.h
div64.h
dma-contiguous.h
dma-iommu.h
dma-mapping.h
dma.h
domain.h
ecard.h
edac.h
elf.h
entry-macro-multi.S
exception.h
fb.h
fiq.h
firmware.h
fixmap.h
flat.h
floppy.h
fncpy.h
fpstate.h
ftrace.h
futex.h
glue-cache.h
glue-df.h
glue-pf.h
glue-proc.h
glue.h
gpio.h
hardirq.h
highmem.h
hugetlb-3level.h
hugetlb.h
hw_breakpoint.h
hw_irq.h
hwcap.h
hypervisor.h
ide.h
idmap.h
io.h
irq.h
irqflags.h
jump_label.h
kexec.h
kgdb.h
kmap_types.h
kprobes.h
kvm_arm.h
kvm_asm.h
kvm_coproc.h
kvm_emulate.h
kvm_host.h
kvm_mmio.h
kvm_mmu.h
kvm_psci.h
limits.h
linkage.h
localtimer.h
mach-types.h
mc146818rtc.h
mcpm.h
memblock.h
memory.h
mmu.h
mmu_context.h
module.h
mpu.h
mtd-xip.h
mutex.h
nwflash.h
opcodes-sec.h
opcodes-virt.h
opcodes.h
outercache.h
page-nommu.h
page.h
pci.h
percpu.h
perf_event.h
pgalloc.h
pgtable-2level-hwdef.h
pgtable-2level-types.h
pgtable-2level.h
pgtable-3level-hwdef.h
pgtable-3level-types.h
pgtable-3level.h
pgtable-hwdef.h
pgtable-nommu.h
pgtable.h
pmu.h
proc-fns.h
processor.h
procinfo.h
prom.h
psci.h
ptrace.h
scatterlist.h
sched_clock.h
seccomp.h
setup.h
shmparam.h
signal.h
smp.h
smp_plat.h
smp_scu.h
smp_twd.h
sparsemem.h
spinlock.h
spinlock_types.h
stackprotector.h
stacktrace.h
string.h
suspend.h
swab.h
switch_to.h
sync_bitops.h
syscall.h
system.h
system_info.h
system_misc.h
tcm.h
therm.h
thread_info.h
thread_notify.h
timex.h
tlb.h
tlbflush.h
tls.h
topology.h
traps.h
uaccess.h
ucontext.h
unified.h
unistd.h
unwind.h
user.h
v7m.h
vfp.h
vfpmacros.h
vga.h
virt.h
word-at-a-time.h
xor.h
debug
uapi
kernel
kvm
lib
mach-at91
mach-bcm
mach-bcm2835
mach-clps711x
mach-cns3xxx
mach-davinci
mach-dove
mach-ebsa110
mach-ep93xx
mach-exynos
mach-footbridge
mach-gemini
mach-highbank
mach-imx
mach-integrator
mach-iop13xx
mach-iop32x
mach-iop33x
mach-ixp4xx
mach-keystone
mach-kirkwood
mach-ks8695
mach-lpc32xx
mach-mmp
mach-msm
mach-mv78xx0
mach-mvebu
mach-mxs
mach-netx
mach-nomadik
mach-nspire
mach-omap1
mach-omap2
mach-orion5x
mach-picoxcell
mach-prima2
mach-pxa
mach-realview
mach-rockchip
mach-rpc
mach-s3c24xx
mach-s3c64xx
mach-s5p64x0
mach-s5pc100
mach-s5pv210
mach-sa1100
mach-shark
mach-shmobile
mach-socfpga
mach-spear
mach-sti
mach-sunxi
mach-tegra
mach-u300
mach-ux500
mach-versatile
mach-vexpress
mach-virt
mach-vt8500
mach-w90x900
mach-zynq
mm
net
nwfpe
oprofile
plat-iop
plat-omap
plat-orion
plat-pxa
plat-samsung
plat-versatile
tools
vfp
xen
Kconfig
Kconfig-nommu
Kconfig.debug
Makefile
arm64
avr32
blackfin
c6x
cris
frv
h8300
hexagon
ia64
m32r
m68k
metag
microblaze
mips
mn10300
openrisc
parisc
powerpc
s390
score
sh
sparc
tile
um
unicore32
x86
xtensa
.gitignore
Kconfig
block
crypto
drivers
firmware
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
arch
/
arm
/
include
/
asm
/
tlb.h
Copy path
Blame
Blame
Latest commit
History
History
223 lines (191 loc) · 5.46 KB
Breadcrumbs
linux
/
arch
/
arm
/
include
/
asm
/
tlb.h
Top
File metadata and controls
Code
Blame
223 lines (191 loc) · 5.46 KB
Raw
/* * arch/arm/include/asm/tlb.h * * Copyright (C) 2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Experimentation shows that on a StrongARM, it appears to be faster * to use the "invalidate whole tlb" rather than "invalidate single * tlb" for this. * * This appears true for both the process fork+exit case, as well as * the munmap-large-area case. */ #ifndef __ASMARM_TLB_H #define __ASMARM_TLB_H #include <asm/cacheflush.h> #ifndef CONFIG_MMU #include <linux/pagemap.h> #define tlb_flush(tlb) ((void) tlb) #include <asm-generic/tlb.h> #else /* !CONFIG_MMU */ #include <linux/swap.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #define MMU_GATHER_BUNDLE 8 /* * TLB handling. This allows us to remove pages from the page * tables, and efficiently handle the TLB issues. */ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; struct vm_area_struct *vma; unsigned long start, end; unsigned long range_start; unsigned long range_end; unsigned int nr; unsigned int max; struct page **pages; struct page *local[MMU_GATHER_BUNDLE]; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); /* * This is unnecessarily complex. There's three ways the TLB shootdown * code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. * tlb->vma will be non-NULL. * 2. Unmapping all vmas. See exit_mmap(). * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. * tlb->vma will be non-NULL. Additionally, page tables will be freed. * 3. Unmapping argument pages. See shift_arg_pages(). * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. * tlb->vma will be NULL. */ static inline void tlb_flush(struct mmu_gather *tlb) { if (tlb->fullmm || !tlb->vma) flush_tlb_mm(tlb->mm); else if (tlb->range_end > 0) { flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); tlb->range_start = TASK_SIZE; tlb->range_end = 0; } } static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) { if (!tlb->fullmm) { if (addr < tlb->range_start) tlb->range_start = addr; if (addr + PAGE_SIZE > tlb->range_end) tlb->range_end = addr + PAGE_SIZE; } } static inline void __tlb_alloc_page(struct mmu_gather *tlb) { unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (addr) { tlb->pages = (void *)addr; tlb->max = PAGE_SIZE / sizeof(struct page *); } } static inline void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush(tlb); free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb->nr = 0; if (tlb->pages == tlb->local) __tlb_alloc_page(tlb); } static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; tlb->fullmm = !(start | (end+1)); tlb->start = start; tlb->end = end; tlb->vma = NULL; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; tlb->nr = 0; __tlb_alloc_page(tlb); } static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); if (tlb->pages != tlb->local) free_pages((unsigned long)tlb->pages, 0); } /* * Memorize the range for the TLB flush. */ static inline void tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) { tlb_add_flush(tlb, addr); } /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm) { flush_cache_range(vma, vma->vm_start, vma->vm_end); tlb->vma = vma; tlb->range_start = TASK_SIZE; tlb->range_end = 0; } } static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm) tlb_flush(tlb); } static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->pages[tlb->nr++] = page; VM_BUG_ON(tlb->nr > tlb->max); return tlb->max - tlb->nr; } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { if (!__tlb_remove_page(tlb, page)) tlb_flush_mmu(tlb); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { pgtable_page_dtor(pte); #ifdef CONFIG_ARM_LPAE tlb_add_flush(tlb, addr); #else /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. */ addr &= PMD_MASK; tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M); #endif tlb_remove_page(tlb, pte); } static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { #ifdef CONFIG_ARM_LPAE tlb_add_flush(tlb, addr); tlb_remove_page(tlb, virt_to_page(pmdp)); #endif } static inline void tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { tlb_add_flush(tlb, addr); } #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) #define tlb_migrate_finish(mm) do { } while (0) #endif /* CONFIG_MMU */ #endif
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
You can’t perform that action at this time.