Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
edcda26
Breadcrumbs
linux
/
arch
/
arm
/
include
/
asm
/
assembler.h
Blame
Blame
Latest commit
History
History
296 lines (265 loc) · 6.32 KB
Breadcrumbs
linux
/
arch
/
arm
/
include
/
asm
/
assembler.h
Top
File metadata and controls
Code
Blame
296 lines (265 loc) · 6.32 KB
Raw
/* * arch/arm/include/asm/assembler.h * * Copyright (C) 1996-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains arm architecture specific defines * for the different processors. * * Do not include any C declarations in this file - it is included by * assembler source. */ #ifndef __ASM_ASSEMBLER_H__ #define __ASM_ASSEMBLER_H__ #ifndef __ASSEMBLY__ #error "Only include this from assembly code" #endif #include <asm/ptrace.h> #include <asm/domain.h> /* * Endian independent macros for shifting bytes within registers. */ #ifndef __ARMEB__ #define pull lsr #define push lsl #define get_byte_0 lsl #0 #define get_byte_1 lsr #8 #define get_byte_2 lsr #16 #define get_byte_3 lsr #24 #define put_byte_0 lsl #0 #define put_byte_1 lsl #8 #define put_byte_2 lsl #16 #define put_byte_3 lsl #24 #else #define pull lsl #define push lsr #define get_byte_0 lsr #24 #define get_byte_1 lsr #16 #define get_byte_2 lsr #8 #define get_byte_3 lsl #0 #define put_byte_0 lsl #24 #define put_byte_1 lsl #16 #define put_byte_2 lsl #8 #define put_byte_3 lsl #0 #endif /* * Data preload for architectures that support it */ #if __LINUX_ARM_ARCH__ >= 5 #define PLD(code...) code #else #define PLD(code...) #endif /* * This can be used to enable code to cacheline align the destination * pointer when bulk writing to memory. Experiments on StrongARM and * XScale didn't show this a worthwhile thing to do when the cache is not * set to write-allocate (this would need further testing on XScale when WA * is used). * * On Feroceon there is much to gain however, regardless of cache mode. */ #ifdef CONFIG_CPU_FEROCEON #define CALGN(code...) code #else #define CALGN(code...) #endif /* * Enable and disable interrupts */ #if __LINUX_ARM_ARCH__ >= 6 .macro disable_irq_notrace cpsid i .endm .macro enable_irq_notrace cpsie i .endm #else .macro disable_irq_notrace msr cpsr_c, #PSR_I_BIT | SVC_MODE .endm .macro enable_irq_notrace msr cpsr_c, #SVC_MODE .endm #endif .macro asm_trace_hardirqs_off #if defined(CONFIG_TRACE_IRQFLAGS) stmdb sp!, {r0-r3, ip, lr} bl trace_hardirqs_off ldmia sp!, {r0-r3, ip, lr} #endif .endm .macro asm_trace_hardirqs_on_cond, cond #if defined(CONFIG_TRACE_IRQFLAGS) /* * actually the registers should be pushed and pop'd conditionally, but * after bl the flags are certainly clobbered */ stmdb sp!, {r0-r3, ip, lr} bl\cond trace_hardirqs_on ldmia sp!, {r0-r3, ip, lr} #endif .endm .macro asm_trace_hardirqs_on asm_trace_hardirqs_on_cond al .endm .macro disable_irq disable_irq_notrace asm_trace_hardirqs_off .endm .macro enable_irq asm_trace_hardirqs_on enable_irq_notrace .endm /* * Save the current IRQ state and disable IRQs. Note that this macro * assumes FIQs are enabled, and that the processor is in SVC mode. */ .macro save_and_disable_irqs, oldcpsr mrs \oldcpsr, cpsr disable_irq .endm /* * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. */ .macro restore_irqs_notrace, oldcpsr msr cpsr_c, \oldcpsr .endm .macro restore_irqs, oldcpsr tst \oldcpsr, #PSR_I_BIT asm_trace_hardirqs_on_cond eq restore_irqs_notrace \oldcpsr .endm #define USER(x...) \ 9999: x; \ .pushsection __ex_table,"a"; \ .align 3; \ .long 9999b,9001f; \ .popsection #ifdef CONFIG_SMP #define ALT_SMP(instr...) \ 9998: instr /* * Note: if you get assembler errors from ALT_UP() when building with * CONFIG_THUMB2_KERNEL, you almost certainly need to use * ALT_SMP( W(instr) ... ) */ #define ALT_UP(instr...) \ .pushsection ".alt.smp.init", "a" ;\ .long 9998b ;\ 9997: instr ;\ .if . - 9997b != 4 ;\ .error "ALT_UP() content must assemble to exactly 4 bytes";\ .endif ;\ .popsection #define ALT_UP_B(label) \ .equ up_b_offset, label - 9998b ;\ .pushsection ".alt.smp.init", "a" ;\ .long 9998b ;\ W(b) . + up_b_offset ;\ .popsection #else #define ALT_SMP(instr...) #define ALT_UP(instr...) instr #define ALT_UP_B(label) b label #endif /* * SMP data memory barrier */ .macro smp_dmb mode #ifdef CONFIG_SMP #if __LINUX_ARM_ARCH__ >= 7 .ifeqs "\mode","arm" ALT_SMP(dmb) .else ALT_SMP(W(dmb)) .endif #elif __LINUX_ARM_ARCH__ == 6 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb #else #error Incompatible SMP platform #endif .ifeqs "\mode","arm" ALT_UP(nop) .else ALT_UP(W(nop)) .endif #endif .endm #ifdef CONFIG_THUMB2_KERNEL .macro setmode, mode, reg mov \reg, #\mode msr cpsr_c, \reg .endm #else .macro setmode, mode, reg msr cpsr_c, #\mode .endm #endif /* * STRT/LDRT access macros with ARM and Thumb-2 variants */ #ifdef CONFIG_THUMB2_KERNEL .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() 9999: .if \inc == 1 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] .elseif \inc == 4 \instr\cond\()\t\().w \reg, [\ptr, #\off] .else .error "Unsupported inc macro argument" .endif .pushsection __ex_table,"a" .align 3 .long 9999b, \abort .popsection .endm .macro usracc, instr, reg, ptr, inc, cond, rept, abort @ explicit IT instruction needed because of the label @ introduced by the USER macro .ifnc \cond,al .if \rept == 1 itt \cond .elseif \rept == 2 ittt \cond .else .error "Unsupported rept macro argument" .endif .endif @ Slightly optimised to avoid incrementing the pointer twice usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort .if \rept == 2 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort .endif add\cond \ptr, #\rept * \inc .endm #else /* !CONFIG_THUMB2_KERNEL */ .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() .rept \rept 9999: .if \inc == 1 \instr\cond\()b\()\t \reg, [\ptr], #\inc .elseif \inc == 4 \instr\cond\()\t \reg, [\ptr], #\inc .else .error "Unsupported inc macro argument" .endif .pushsection __ex_table,"a" .align 3 .long 9999b, \abort .popsection .endr .endm #endif /* CONFIG_THUMB2_KERNEL */ .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc str, \reg, \ptr, \inc, \cond, \rept, \abort .endm .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm #endif /* __ASM_ASSEMBLER_H__ */
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
You can’t perform that action at this time.