Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
04b8dc8
Documentation
arch
alpha
arc
arm
boot
common
configs
crypto
firmware
include
asm
crypto
hardware
mach
xen
Kbuild
arch_timer.h
asm-offsets.h
assembler.h
atomic.h
bL_switcher.h
barrier.h
bitops.h
bitrev.h
bug.h
bugs.h
cache.h
cacheflush.h
cachetype.h
checksum.h
clkdev.h
cmpxchg.h
compiler.h
cp15.h
cpu.h
cpuidle.h
cputype.h
cti.h
dcc.h
delay.h
device.h
div64.h
dma-contiguous.h
dma-iommu.h
dma-mapping.h
dma.h
domain.h
ecard.h
edac.h
elf.h
entry-macro-multi.S
exception.h
fb.h
fiq.h
firmware.h
fixmap.h
flat.h
floppy.h
fncpy.h
fpstate.h
ftrace.h
futex.h
glue-cache.h
glue-df.h
glue-pf.h
glue-proc.h
glue.h
gpio.h
hardirq.h
highmem.h
hugetlb-3level.h
hugetlb.h
hw_breakpoint.h
hw_irq.h
hwcap.h
hypervisor.h
ide.h
idmap.h
insn.h
io.h
irq.h
irq_work.h
irqflags.h
jump_label.h
kexec.h
kgdb.h
kmap_types.h
kprobes.h
kvm_arm.h
kvm_asm.h
kvm_coproc.h
kvm_emulate.h
kvm_host.h
kvm_mmio.h
kvm_mmu.h
kvm_psci.h
limits.h
linkage.h
mach-types.h
mc146818rtc.h
mcpm.h
mcs_spinlock.h
memblock.h
memory.h
mmu.h
mmu_context.h
module.h
mpu.h
mtd-xip.h
mutex.h
neon.h
nwflash.h
opcodes-sec.h
opcodes-virt.h
opcodes.h
outercache.h
page-nommu.h
page.h
patch.h
pci.h
percpu.h
perf_event.h
pgalloc.h
pgtable-2level-hwdef.h
pgtable-2level-types.h
pgtable-2level.h
pgtable-3level-hwdef.h
pgtable-3level-types.h
pgtable-3level.h
pgtable-hwdef.h
pgtable-nommu.h
pgtable.h
pmu.h
probes.h
proc-fns.h
processor.h
procinfo.h
prom.h
psci.h
ptrace.h
seccomp.h
setup.h
shmparam.h
signal.h
smp.h
smp_plat.h
smp_scu.h
smp_twd.h
sparsemem.h
spinlock.h
spinlock_types.h
stackprotector.h
stacktrace.h
string.h
suspend.h
swab.h
switch_to.h
sync_bitops.h
syscall.h
system_info.h
system_misc.h
tcm.h
therm.h
thread_info.h
thread_notify.h
timex.h
tlb.h
tlbflush.h
tls.h
topology.h
traps.h
trusted_foundations.h
types.h
uaccess.h
ucontext.h
unified.h
unistd.h
unwind.h
uprobes.h
user.h
v7m.h
vfp.h
vfpmacros.h
vga.h
virt.h
word-at-a-time.h
xor.h
debug
uapi
kernel
kvm
lib
mach-asm9260
mach-at91
mach-axxia
mach-bcm
mach-berlin
mach-clps711x
mach-cns3xxx
mach-davinci
mach-digicolor
mach-dove
mach-ebsa110
mach-efm32
mach-ep93xx
mach-exynos
mach-footbridge
mach-gemini
mach-highbank
mach-hisi
mach-imx
mach-integrator
mach-iop13xx
mach-iop32x
mach-iop33x
mach-ixp4xx
mach-keystone
mach-ks8695
mach-lpc32xx
mach-mediatek
mach-meson
mach-mmp
mach-moxart
mach-msm
mach-mv78xx0
mach-mvebu
mach-mxs
mach-netx
mach-nomadik
mach-nspire
mach-omap1
mach-omap2
mach-orion5x
mach-picoxcell
mach-prima2
mach-pxa
mach-qcom
mach-realview
mach-rockchip
mach-rpc
mach-s3c24xx
mach-s3c64xx
mach-s5pv210
mach-sa1100
mach-shmobile
mach-socfpga
mach-spear
mach-sti
mach-sunxi
mach-tegra
mach-u300
mach-ux500
mach-versatile
mach-vexpress
mach-vt8500
mach-w90x900
mach-zynq
mm
net
nwfpe
oprofile
plat-iop
plat-omap
plat-orion
plat-pxa
plat-samsung
plat-versatile
probes
tools
vfp
xen
Kconfig
Kconfig-nommu
Kconfig.debug
Makefile
arm64
avr32
blackfin
c6x
cris
frv
hexagon
ia64
m32r
m68k
metag
microblaze
mips
mn10300
nios2
openrisc
parisc
powerpc
s390
score
sh
sparc
tile
um
unicore32
x86
xtensa
.gitignore
Kconfig
block
crypto
drivers
firmware
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
arch
/
arm
/
include
/
asm
/
kvm_mmu.h
Copy path
Blame
Blame
Latest commit
History
History
274 lines (214 loc) · 6.97 KB
Breadcrumbs
linux
/
arch
/
arm
/
include
/
asm
/
kvm_mmu.h
Top
File metadata and controls
Code
Blame
274 lines (214 loc) · 6.97 KB
Raw
/* * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __ARM_KVM_MMU_H__ #define __ARM_KVM_MMU_H__ #include <asm/memory.h> #include <asm/page.h> /* * We directly use the kernel VA for the HYP, as we can directly share * the mapping (HTTBR "covers" TTBR1). */ #define HYP_PAGE_OFFSET_MASK UL(~0) #define HYP_PAGE_OFFSET PAGE_OFFSET #define KERN_TO_HYP(kva) (kva) /* * Our virtual mapping for the boot-time MMU-enable code. Must be * shared across all the page-tables. Conveniently, we use the vectors * page, where no kernel data will ever be shared with HYP. */ #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) /* * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. */ #define KVM_MMU_CACHE_MIN_PAGES 2 #ifndef __ASSEMBLY__ #include <linux/highmem.h> #include <asm/cacheflush.h> #include <asm/pgalloc.h> int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_boot_hyp_pgd(void); void free_hyp_pgds(void); void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_mmu_get_boot_httbr(void); phys_addr_t kvm_get_idmap_vector(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) { *pmd = new_pmd; flush_pmd_entry(pmd); } static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) { *pte = new_pte; /* * flush_pmd_entry just takes a void pointer and cleans the necessary * cache entries, so we can reuse the function for ptes. */ flush_pmd_entry(pte); } static inline void kvm_clean_pgd(pgd_t *pgd) { clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); } static inline void kvm_clean_pmd(pmd_t *pmd) { clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); } static inline void kvm_clean_pmd_entry(pmd_t *pmd) { clean_pmd_entry(pmd); } static inline void kvm_clean_pte(pte_t *pte) { clean_pte_table(pte); } static inline void kvm_set_s2pte_writable(pte_t *pte) { pte_val(*pte) |= L_PTE_S2_RDWR; } static inline void kvm_set_s2pmd_writable(pmd_t *pmd) { pmd_val(*pmd) |= L_PMD_S2_RDWR; } static inline void kvm_set_s2pte_readonly(pte_t *pte) { pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY; } static inline bool kvm_s2pte_readonly(pte_t *pte) { return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY; } static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) { pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; } static inline bool kvm_s2pmd_readonly(pmd_t *pmd) { return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; } /* Open coded p*d_addr_end that can deal with 64bit addresses */ #define kvm_pgd_addr_end(addr, end) \ ({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #define kvm_pud_addr_end(addr,end) (end) #define kvm_pmd_addr_end(addr, end) \ ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #define kvm_pgd_index(addr) pgd_index(addr) static inline bool kvm_page_empty(void *ptr) { struct page *ptr_page = virt_to_page(ptr); return page_count(ptr_page) == 1; } #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define kvm_pud_table_empty(kvm, pudp) (0) #define KVM_PREALLOC_LEVEL 0 static inline void *kvm_get_hwpgd(struct kvm *kvm) { return kvm->arch.pgd; } static inline unsigned int kvm_get_hwpgd_size(void) { return PTRS_PER_S2_PGD * sizeof(pgd_t); } struct kvm; #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; } static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, unsigned long size, bool ipa_uncached) { /* * If we are going to insert an instruction page and the icache is * either VIPT or PIPT, there is a potential problem where the host * (or another VM) may have used the same page as this guest, and we * read incorrect data from the icache. If we're using a PIPT cache, * we can invalidate just that page, but if we are using a VIPT cache * we need to invalidate the entire icache - damn shame - as written * in the ARM ARM (DDI 0406C.b - Page B3-1393). * * VIVT caches are tagged using both the ASID and the VMID and doesn't * need any kind of flushing (DDI 0406C.b - Page B3-1392). * * We need to do this through a kernel mapping (using the * user-space mapping has proved to be the wrong * solution). For that, we need to kmap one page at a time, * and iterate over the range. */ bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; VM_BUG_ON(size & ~PAGE_MASK); if (!need_flush && !icache_is_pipt()) goto vipt_cache; while (size) { void *va = kmap_atomic_pfn(pfn); if (need_flush) kvm_flush_dcache_to_poc(va, PAGE_SIZE); if (icache_is_pipt()) __cpuc_coherent_user_range((unsigned long)va, (unsigned long)va + PAGE_SIZE); size -= PAGE_SIZE; pfn++; kunmap_atomic(va); } vipt_cache: if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { /* any kind of VIPT cache */ __flush_icache_all(); } } static inline void __kvm_flush_dcache_pte(pte_t pte) { void *va = kmap_atomic(pte_page(pte)); kvm_flush_dcache_to_poc(va, PAGE_SIZE); kunmap_atomic(va); } static inline void __kvm_flush_dcache_pmd(pmd_t pmd) { unsigned long size = PMD_SIZE; pfn_t pfn = pmd_pfn(pmd); while (size) { void *va = kmap_atomic_pfn(pfn); kvm_flush_dcache_to_poc(va, PAGE_SIZE); pfn++; size -= PAGE_SIZE; kunmap_atomic(va); } } static inline void __kvm_flush_dcache_pud(pud_t pud) { } #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
You can’t perform that action at this time.