Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
1
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
3fd00be
Documentation
LICENSES
arch
alpha
arc
arm
arm64
boot
configs
crypto
include
kernel
probes
vdso
.gitignore
Makefile
acpi.c
acpi_numa.c
acpi_parking_protocol.c
alternative.c
armv8_deprecated.c
asm-offsets.c
cacheinfo.c
cpu-reset.S
cpu-reset.h
cpu_errata.c
cpu_ops.c
cpufeature.c
cpuidle.c
cpuinfo.c
crash_core.c
crash_dump.c
debug-monitors.c
efi-entry.S
efi-header.S
efi-rt-wrapper.S
efi.c
entry-fpsimd.S
entry-ftrace.S
entry.S
fpsimd.c
ftrace.c
head.S
hibernate-asm.S
hibernate.c
hw_breakpoint.c
hyp-stub.S
image.h
insn.c
io.c
irq.c
jump_label.c
kaslr.c
kexec_image.c
kgdb.c
kuser32.S
machine_kexec.c
machine_kexec_file.c
module-plts.c
module.c
module.lds
paravirt.c
pci.c
perf_callchain.c
perf_event.c
perf_regs.c
pointer_auth.c
process.c
psci.c
ptrace.c
reloc_test_core.c
reloc_test_syms.S
relocate_kernel.S
return_address.c
sdei.c
setup.c
signal.c
signal32.c
sigreturn32.S
sleep.S
smccc-call.S
smp.c
smp_spin_table.c
ssbd.c
stacktrace.c
suspend.c
sys.c
sys32.c
sys_compat.c
syscall.c
time.c
topology.c
trace-events-emulation.h
traps.c
vdso.c
vmlinux.lds.S
kvm
lib
mm
net
xen
Kconfig
Kconfig.debug
Kconfig.platforms
Makefile
c6x
csky
h8300
hexagon
ia64
m68k
microblaze
mips
nds32
nios2
openrisc
parisc
powerpc
riscv
s390
sh
sparc
um
unicore32
x86
xtensa
.gitignore
Kconfig
block
certs
crypto
drivers
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
arch
/
arm64
/
kernel
/
module.c
Copy path
Blame
Blame
Latest commit
Ard Biesheuvel
and
Will Deacon
arm64/module: revert to unsigned interpretation of ABS16/32 relocations
May 28, 2019
3fd00be
·
May 28, 2019
History
History
494 lines (445 loc) · 13.4 KB
Breadcrumbs
linux
/
arch
/
arm64
/
kernel
/
module.c
Top
File metadata and controls
Code
Blame
494 lines (445 loc) · 13.4 KB
Raw
/* * AArch64 loadable module support. * * Copyright (C) 2012 ARM Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author: Will Deacon <will.deacon@arm.com> */ #include <linux/bitops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <linux/kasan.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> #include <asm/alternative.h> #include <asm/insn.h> #include <asm/sections.h> void *module_alloc(unsigned long size) { gfp_t gfp_mask = GFP_KERNEL; void *p; /* Silence the initial allocation */ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, module_alloc_base + MODULES_VSIZE, gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && !IS_ENABLED(CONFIG_KASAN)) /* * KASAN can only deal with module allocations being served * from the reserved module region, since the remainder of * the vmalloc region is already backed by zero shadow pages, * and punching holes into it is non-trivial. Since the module * region is not randomized when KASAN is enabled, it is even * less likely that the module region gets exhausted, so we * can simply omit this fallback in that case. */ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, module_alloc_base + SZ_2G, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (p && (kasan_module_alloc(p, size) < 0)) { vfree(p); return NULL; } return p; } enum aarch64_reloc_op { RELOC_OP_NONE, RELOC_OP_ABS, RELOC_OP_PREL, RELOC_OP_PAGE, }; static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) { switch (reloc_op) { case RELOC_OP_ABS: return val; case RELOC_OP_PREL: return val - (u64)place; case RELOC_OP_PAGE: return (val & ~0xfff) - ((u64)place & ~0xfff); case RELOC_OP_NONE: return 0; } pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); return 0; } static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) { s64 sval = do_reloc(op, place, val); /* * The ELF psABI for AArch64 documents the 16-bit and 32-bit place * relative and absolute relocations as having a range of [-2^15, 2^16) * or [-2^31, 2^32), respectively. However, in order to be able to * detect overflows reliably, we have to choose whether we interpret * such quantities as signed or as unsigned, and stick with it. * The way we organize our address space requires a signed * interpretation of 32-bit relative references, so let's use that * for all R_AARCH64_PRELxx relocations. This means our upper * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. */ switch (len) { case 16: *(s16 *)place = sval; switch (op) { case RELOC_OP_ABS: if (sval < 0 || sval > U16_MAX) return -ERANGE; break; case RELOC_OP_PREL: if (sval < S16_MIN || sval > S16_MAX) return -ERANGE; break; default: pr_err("Invalid 16-bit data relocation (%d)\n", op); return 0; } break; case 32: *(s32 *)place = sval; switch (op) { case RELOC_OP_ABS: if (sval < 0 || sval > U32_MAX) return -ERANGE; break; case RELOC_OP_PREL: if (sval < S32_MIN || sval > S32_MAX) return -ERANGE; break; default: pr_err("Invalid 32-bit data relocation (%d)\n", op); return 0; } break; case 64: *(s64 *)place = sval; break; default: pr_err("Invalid length (%d) for data relocation\n", len); return 0; } return 0; } enum aarch64_insn_movw_imm_type { AARCH64_INSN_IMM_MOVNZ, AARCH64_INSN_IMM_MOVKZ, }; static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, int lsb, enum aarch64_insn_movw_imm_type imm_type) { u64 imm; s64 sval; u32 insn = le32_to_cpu(*place); sval = do_reloc(op, place, val); imm = sval >> lsb; if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the * immediate is less than zero. */ insn &= ~(3 << 29); if (sval >= 0) { /* >=0: Set the instruction to MOVZ (opcode 10b). */ insn |= 2 << 29; } else { /* * <0: Set the instruction to MOVN (opcode 00b). * Since we've masked the opcode already, we * don't need to do anything other than * inverting the new immediate field. */ imm = ~imm; } } /* Update the instruction with the new encoding. */ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); *place = cpu_to_le32(insn); if (imm > U16_MAX) return -ERANGE; return 0; } static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, int lsb, int len, enum aarch64_insn_imm_type imm_type) { u64 imm, imm_mask; s64 sval; u32 insn = le32_to_cpu(*place); /* Calculate the relocation value. */ sval = do_reloc(op, place, val); sval >>= lsb; /* Extract the value bits and shift them to bit 0. */ imm_mask = (BIT(lsb + len) - 1) >> lsb; imm = sval & imm_mask; /* Update the instruction's immediate field. */ insn = aarch64_insn_encode_immediate(imm_type, insn, imm); *place = cpu_to_le32(insn); /* * Extract the upper value bits (including the sign bit) and * shift them to bit 0. */ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); /* * Overflow has occurred if the upper bits are not all equal to * the sign bit of the value. */ if ((u64)(sval + 1) >= 2) return -ERANGE; return 0; } static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, __le32 *place, u64 val) { u32 insn; if (!is_forbidden_offset_for_adrp(place)) return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, AARCH64_INSN_IMM_ADR); /* patch ADRP to ADR if it is in range */ if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, AARCH64_INSN_IMM_ADR)) { insn = le32_to_cpu(*place); insn &= ~BIT(31); } else { /* out of range for ADR -> emit a veneer */ val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); if (!val) return -ENOEXEC; insn = aarch64_insn_gen_branch_imm((u64)place, val, AARCH64_INSN_BRANCH_NOLINK); } *place = cpu_to_le32(insn); return 0; } int apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; int ovf; bool overflow_check; Elf64_Sym *sym; void *loc; u64 val; Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* loc corresponds to P in the AArch64 ELF document. */ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* sym is the ELF symbol we're referring to. */ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + ELF64_R_SYM(rel[i].r_info); /* val corresponds to (S + A) in the AArch64 ELF document. */ val = sym->st_value + rel[i].r_addend; /* Check for overflow by default. */ overflow_check = true; /* Perform the static relocation. */ switch (ELF64_R_TYPE(rel[i].r_info)) { /* Null relocations. */ case R_ARM_NONE: case R_AARCH64_NONE: ovf = 0; break; /* Data relocations. */ case R_AARCH64_ABS64: overflow_check = false; ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); break; case R_AARCH64_ABS32: ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); break; case R_AARCH64_ABS16: ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); break; case R_AARCH64_PREL64: overflow_check = false; ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); break; case R_AARCH64_PREL32: ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); break; case R_AARCH64_PREL16: ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); break; /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_SABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_SABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G0_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_PREL_G0: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G1_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_PREL_G1: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G2_NC: overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_PREL_G2: ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, AARCH64_INSN_IMM_MOVNZ); break; case R_AARCH64_MOVW_PREL_G3: /* We're using the top bits so we can't overflow. */ overflow_check = false; ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, AARCH64_INSN_IMM_MOVNZ); break; /* Immediate instruction relocations. */ case R_AARCH64_LD_PREL_LO19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, AARCH64_INSN_IMM_19); break; case R_AARCH64_ADR_PREL_LO21: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, AARCH64_INSN_IMM_ADR); break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) return ovf; break; case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST16_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST32_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST64_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, AARCH64_INSN_IMM_12); break; case R_AARCH64_LDST128_ABS_LO12_NC: overflow_check = false; ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, AARCH64_INSN_IMM_12); break; case R_AARCH64_TSTBR14: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, AARCH64_INSN_IMM_14); break; case R_AARCH64_CONDBR19: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, AARCH64_INSN_IMM_19); break; case R_AARCH64_JUMP26: case R_AARCH64_CALL26: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, AARCH64_INSN_IMM_26); if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && ovf == -ERANGE) { val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); if (!val) return -ENOEXEC; ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, AARCH64_INSN_IMM_26); } break; default: pr_err("module %s: unsupported RELA relocation: %llu\n", me->name, ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } if (overflow_check && ovf == -ERANGE) goto overflow; } return 0; overflow: pr_err("module %s: overflow in relocation type %d val %Lx\n", me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); return -ENOEXEC; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { const Elf_Shdr *s, *se; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) apply_alternatives_module((void *)s->sh_addr, s->sh_size); #ifdef CONFIG_ARM64_MODULE_PLTS if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) me->arch.ftrace_trampoline = (void *)s->sh_addr; #endif } return 0; }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
You can’t perform that action at this time.