Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
13a695a
Documentation
LICENSES
arch
alpha
arc
arm
boot
common
configs
crypto
include
kernel
.gitignore
Makefile
arch_timer.c
armksyms.c
asm-offsets.c
atags.h
atags_compat.c
atags_parse.c
atags_proc.c
bios32.c
bugs.c
cpuidle.c
crash_dump.c
debug.S
devtree.c
dma-isa.c
dma.c
early_printk.c
efi.c
elf.c
entry-armv.S
entry-common.S
entry-ftrace.S
entry-header.S
entry-v7m.S
fiq.c
fiqasm.S
ftrace.c
head-common.S
head-inflate-data.c
head-nommu.S
head.S
hibernate.c
hw_breakpoint.c
hyp-stub.S
insn.c
io.c
irq.c
isa.c
iwmmxt.S
iwmmxt.h
jump_label.c
kgdb.c
machine_kexec.c
module-plts.c
module.c
opcodes.c
paravirt.c
patch.c
perf_callchain.c
perf_event_v6.c
perf_event_v7.c
perf_event_xscale.c
perf_regs.c
phys2virt.S
pj4-cp0.c
process.c
psci_smp.c
ptrace.c
reboot.c
reboot.h
relocate_kernel.S
return_address.c
setup.c
signal.c
signal.h
sigreturn_codes.S
sleep.S
smccc-call.S
smp.c
smp_scu.c
smp_tlb.c
smp_twd.c
stacktrace.c
suspend.c
swp_emulate.c
sys_arm.c
sys_oabi-compat.c
tcm.c
thumbee.c
time.c
topology.c
traps.c
unwind.c
v7m.c
vdso.c
vmlinux-xip.lds.S
vmlinux.lds.S
xscale-cp0.c
lib
mach-actions
mach-alpine
mach-artpec
mach-asm9260
mach-aspeed
mach-at91
mach-axxia
mach-bcm
mach-berlin
mach-clps711x
mach-cns3xxx
mach-davinci
mach-digicolor
mach-dove
mach-ep93xx
mach-exynos
mach-footbridge
mach-gemini
mach-highbank
mach-hisi
mach-imx
mach-integrator
mach-iop32x
mach-ixp4xx
mach-keystone
mach-lpc18xx
mach-lpc32xx
mach-mediatek
mach-meson
mach-milbeaut
mach-mmp
mach-moxart
mach-mstar
mach-mv78xx0
mach-mvebu
mach-mxs
mach-nomadik
mach-npcm
mach-nspire
mach-omap1
mach-omap2
mach-orion5x
mach-oxnas
mach-pxa
mach-qcom
mach-rda
mach-realtek
mach-realview
mach-rockchip
mach-rpc
mach-s3c
mach-s5pv210
mach-sa1100
mach-shmobile
mach-socfpga
mach-spear
mach-sti
mach-stm32
mach-sunxi
mach-tegra
mach-uniphier
mach-ux500
mach-versatile
mach-vexpress
mach-vt8500
mach-zynq
mm
net
nwfpe
plat-omap
plat-orion
plat-pxa
plat-versatile
probes
tools
vdso
vfp
xen
Kbuild
Kconfig
Kconfig-nommu
Kconfig.assembler
Kconfig.debug
Makefile
arm64
csky
h8300
hexagon
ia64
m68k
microblaze
mips
nds32
nios2
openrisc
parisc
powerpc
riscv
s390
sh
sparc
um
x86
xtensa
.gitignore
Kconfig
block
certs
crypto
drivers
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
arch
/
arm
/
kernel
/
head.S
Copy path
Blame
Blame
Latest commit
History
History
587 lines (534 loc) · 15.7 KB
Breadcrumbs
linux
/
arch
/
arm
/
kernel
/
head.S
Top
File metadata and controls
Code
Blame
587 lines (534 loc) · 15.7 KB
Raw
/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King * Copyright (c) 2003 ARM Limited * All Rights Reserved * * Kernel startup code for all 32-bit CPUs */ #include <linux/linkage.h> #include <linux/init.h> #include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/cp15.h> #include <asm/domain.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/memory.h> #include <asm/thread_info.h> #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ #define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif #ifdef CONFIG_ARM_LPAE /* LPAE requires an additional page for the PGD */ #define PG_DIR_SIZE 0x5000 #define PMD_ORDER 3 #else #define PG_DIR_SIZE 0x4000 #define PMD_ORDER 2 #endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE /* * This needs to be assigned at runtime when the linker symbols are * resolved. These are unsigned 64bit really, but in this assembly code * We store them as 32bit. */ .pushsection .data .align 2 .globl kernel_sec_start .globl kernel_sec_end kernel_sec_start: .long 0 .long 0 kernel_sec_end: .long 0 .long 0 .popsection .macro pgtbl, rd, phys add \rd, \phys, #TEXT_OFFSET sub \rd, \rd, #PG_DIR_SIZE .endm /* * Kernel startup entry point. * --------------------------- * * This is normally called from the decompressor code. The requirements * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, * r1 = machine nr, r2 = atags or dtb pointer. * * This code is mostly position independent, so if you link the kernel at * 0xc0008000, you call this at __pa(0xc0008000). * * See linux/arch/arm/tools/mach-types for the complete list of machine * numbers for r1. * * We're trying to keep crap to a minimum; DO NOT add any machine specific * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ .arm __HEAD ENTRY(stext) ARM_BE8(setend be ) @ ensure we are in BE8 mode THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install #endif @ ensure svc mode and all interrupts masked safe_svcmode_maskall r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' #ifdef CONFIG_ARM_LPAE mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0 and r3, r3, #0xf @ extract VMSA support cmp r3, #5 @ long-descriptor translation table format? THUMB( it lo ) @ force fixup-able long branch encoding blo __error_lpae @ only classic page table format #endif #ifndef CONFIG_XIP_KERNEL adr_l r8, _text @ __pa(_text) sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET #else ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif /* * r1 = machine no, r2 = atags or dtb, * r8 = phys_offset, r9 = cpuid, r10 = procinfo */ bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT bl __fixup_pv_table #endif bl __create_page_tables /* * The following calls CPU specific code in a position independent * manner. See arch/arm/mm/proc-*.S for details. r10 = base of * xxx_proc_info structure selected by __lookup_processor_type * above. * * The processor init function will be called with: * r1 - machine type * r2 - boot data (atags/dt) pointer * r4 - translation table base (low word) * r5 - translation table base (high word, if LPAE) * r8 - translation table base 1 (pfn if LPAE) * r9 - cpuid * r13 - virtual address for __enable_mmu -> __turn_mmu_on * * On return, the CPU will be ready for the MMU to be turned on, * r0 will hold the CPU control register value, r1, r2, r4, and * r9 will be preserved. r5 will also be preserved if LPAE. */ ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled badr lr, 1f @ return (PIC) address #ifdef CONFIG_ARM_LPAE mov r5, #0 @ high TTBR0 mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn #else mov r8, r4 @ set TTBR1 to swapper_pg_dir #endif ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 ret r12 1: b __enable_mmu ENDPROC(stext) .ltorg /* * Setup the initial page tables. We only setup the barest * amount which are required to get the kernel running, which * generally means mapping in the kernel code. * * r8 = phys_offset, r9 = cpuid, r10 = procinfo * * Returns: * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: pgtbl r4, r8 @ page table address /* * Clear the swapper page table */ mov r0, r4 mov r3, #0 add r6, r0, #PG_DIR_SIZE 1: str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 teq r0, r6 bne 1b #ifdef CONFIG_ARM_LPAE /* * Build the PGD table (first level) to point to the PMD table. A PGD * entry is 64-bit wide. */ mov r0, r4 add r3, r4, #0x1000 @ first PMD table address orr r3, r3, #3 @ PGD block type mov r6, #4 @ PTRS_PER_PGD mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 1: #ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 @ set top PGD entry bits str r3, [r0], #4 @ set bottom PGD entry bits #else str r3, [r0], #4 @ set bottom PGD entry bits str r7, [r0], #4 @ set top PGD entry bits #endif add r3, r3, #0x1000 @ next PMD table subs r6, r6, #1 bne 1b add r4, r4, #0x1000 @ point to the PMD tables #ifdef CONFIG_CPU_ENDIAN_BE8 add r4, r4, #4 @ we only write the bottom word #endif #endif ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on) adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end) mov r5, r5, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping cmp r5, r6 addlo r5, r5, #1 @ next section blo 1b /* * The main matter: map in the kernel using section mappings, and * set two variables to indicate the physical start and end of the * kernel. */ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) ldr r6, =(_end - 1) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) #ifdef CONFIG_CPU_ENDIAN_BE8 str r8, [r5, #4] @ Save physical start of kernel (BE) #else str r8, [r5] @ Save physical start of kernel (LE) #endif orr r3, r8, r7 @ Add the MMU flags add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: str r3, [r0], #1 << PMD_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b eor r3, r3, r7 @ Remove the MMU flags adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) #ifdef CONFIG_CPU_ENDIAN_BE8 str r3, [r5, #4] @ Save physical end of kernel (BE) #else str r3, [r5] @ Save physical end of kernel (LE) #endif #ifdef CONFIG_XIP_KERNEL /* * Map the kernel image separately as it is not located in RAM. */ #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) mov r3, pc mov r3, r3, lsr #SECTION_SHIFT orr r3, r7, r3, lsl #SECTION_SHIFT add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! ldr r6, =(_edata_loc - 1) add r0, r0, #1 << PMD_ORDER add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: cmp r0, r6 add r3, r3, #1 << SECTION_SHIFT strls r3, [r0], #1 << PMD_ORDER bls 1b #endif /* * Then map boot params address in r2 if specified. * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT cmp r2, #0 ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER) addne r3, r3, r4 orrne r6, r7, r0, lsl #SECTION_SHIFT strne r6, [r3], #1 << PMD_ORDER addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif #ifdef CONFIG_DEBUG_LL #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) /* * Map in IO space for serial debugging. * This allows debug messages to be output * via a serial console before paging_init. */ addruart r7, r3, r0 mov r3, r3, lsr #SECTION_SHIFT mov r3, r3, lsl #PMD_ORDER add r0, r4, r3 mov r3, r7, lsr #SECTION_SHIFT ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags orr r3, r7, r3, lsl #SECTION_SHIFT #ifdef CONFIG_ARM_LPAE mov r7, #1 << (54 - 32) @ XN #ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 str r3, [r0], #4 #else str r3, [r0], #4 str r7, [r0], #4 #endif #else orr r3, r3, #PMD_SECT_XN str r3, [r0], #4 #endif #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ /* we don't need any serial debugging mappings */ ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags #endif #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) /* * If we're using the NetWinder or CATS, we also need to map * in the 16550-type serial port for the debug messages */ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) orr r3, r7, #0x7c000000 str r3, [r0] #endif #ifdef CONFIG_ARCH_RPC /* * Map in screen at 0x02000000 & SCREEN2_BASE * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) orr r3, r7, #0x02000000 str r3, [r0] add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0] #endif #endif #ifdef CONFIG_ARM_LPAE sub r4, r4, #0x1000 @ point to the PGD table #endif ret lr ENDPROC(__create_page_tables) .ltorg #if defined(CONFIG_SMP) .text .arm ENTRY(secondary_startup_arm) THUMB( badr r9, 1f ) @ Kernel is entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ ARM_BE8(setend be) @ ensure we are in BE8 mode #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p /* * Use the page tables supplied from __cpu_up. */ adr_l r3, secondary_data mov_l r12, __secondary_switched ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps ARM_BE8(eor r4, r4, r5) @ without using a temp reg. ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir badr lr, __enable_mmu @ return address mov r13, r12 @ __secondary_switched address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 @ initialise processor @ (return control reg) ret r12 ENDPROC(secondary_startup) ENDPROC(secondary_startup_arm) ENTRY(__secondary_switched) adr_l r7, secondary_data + 12 @ get secondary_data.stack ldr sp, [r7] ldr r0, [r7, #4] @ get secondary_data.task mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. All these registers need to be preserved by the * processor setup function (or set in the case of r0) * * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer * r4 = TTBR pointer (low word) * r5 = TTBR pointer (high word if LPAE) * r9 = processor ID * r13 = *virtual* address to jump to upon completion */ __enable_mmu: #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif #ifdef CONFIG_ARM_LPAE mcrr p15, 0, r4, r5, c2 @ load TTBR0 #else mov r5, #DACR_INIT mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 .pushsection .idmap.text, "ax" ENTRY(__turn_mmu_on) mov r0, r0 instr_sync mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg instr_sync mov r3, r3 mov r3, r13 ret r3 __turn_mmu_on_end: ENDPROC(__turn_mmu_on) .popsection #ifdef CONFIG_SMP_ON_UP __HEAD __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? bne __fixup_smp_on_up @ no, assume UP bic r3, r9, #0x00ff0000 bic r3, r3, #0x0000000f @ mask 0xff00fff0 mov r4, #0x41000000 orr r4, r4, #0x0000b000 orr r4, r4, #0x00000020 @ val 0x4100b020 teq r3, r4 @ ARM 11MPCore? reteq lr @ yes, assume SMP mrc p15, 0, r0, c0, c0, 5 @ read MPIDR and r0, r0, #0xc0000000 @ multiprocessing extensions and teq r0, #0x80000000 @ not part of a uniprocessor system? bne __fixup_smp_on_up @ no, assume UP @ Core indicates it is SMP. Check for Aegis SOC where a single @ Cortex-A9 CPU is present but SMP operations fault. mov r4, #0x41000000 orr r4, r4, #0x0000c000 orr r4, r4, #0x00000090 teq r3, r4 @ Check for ARM Cortex-A9 retne lr @ Not ARM Cortex-A9, @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the @ below address check will need to be #ifdef'd or equivalent @ for the Aegis platform. mrc p15, 4, r0, c15, c0 @ get SCU base address teq r0, #0x0 @ '0' on actual UP A9 hardware beq __fixup_smp_on_up @ So its an A9 UP ldr r0, [r0, #4] @ read SCU Config ARM_BE8(rev r0, r0) @ byteswap if big endian and r0, r0, #0x3 @ number of CPUs teq r0, #0x0 @ is 1? retne lr __fixup_smp_on_up: adr_l r4, __smpalt_begin adr_l r5, __smpalt_end b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .pushsection .data .align 2 .globl smp_on_up smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection #endif .text __do_fixup_smp_on_up: cmp r4, r5 reths lr ldmia r4, {r0, r6} ARM( str r6, [r0, r4] ) THUMB( add r0, r0, r4 ) add r4, r4, #8 #ifdef __ARMEB__ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0. THUMB( strh r6, [r0] ) b __do_fixup_smp_on_up ENDPROC(__do_fixup_smp_on_up) ENTRY(fixup_smp) stmfd sp!, {r4 - r6, lr} mov r4, r0 add r5, r0, r1 bl __do_fixup_smp_on_up ldmfd sp!, {r4 - r6, pc} ENDPROC(fixup_smp) #include "head-common.S"
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
You can’t perform that action at this time.