Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
dcba710
Documentation
arch
block
certs
crypto
drivers
firmware
fs
include
init
ipc
kernel
bpf
cgroup
configs
debug
events
gcov
irq
livepatch
Kconfig
Makefile
core.c
core.h
patch.c
patch.h
transition.c
transition.h
locking
power
printk
rcu
sched
time
trace
.gitignore
Kconfig.freezer
Kconfig.hz
Kconfig.locks
Kconfig.preempt
Makefile
acct.c
async.c
audit.c
audit.h
audit_fsnotify.c
audit_tree.c
audit_watch.c
auditfilter.c
auditsc.c
backtracetest.c
bounds.c
capability.c
compat.c
configs.c
context_tracking.c
cpu.c
cpu_pm.c
crash_core.c
crash_dump.c
cred.c
delayacct.c
dma.c
elfcore.c
exec_domain.c
exit.c
extable.c
fork.c
freezer.c
futex.c
futex_compat.c
groups.c
hung_task.c
irq_work.c
jump_label.c
kallsyms.c
kcmp.c
kcov.c
kexec.c
kexec_core.c
kexec_file.c
kexec_internal.h
kmod.c
kprobes.c
ksysfs.c
kthread.c
latencytop.c
membarrier.c
memremap.c
module-internal.h
module.c
module_signing.c
notifier.c
nsproxy.c
padata.c
panic.c
params.c
pid.c
pid_namespace.c
profile.c
ptrace.c
range.c
reboot.c
relay.c
resource.c
seccomp.c
signal.c
smp.c
smpboot.c
smpboot.h
softirq.c
stacktrace.c
stop_machine.c
sys.c
sys_ni.c
sysctl.c
sysctl_binary.c
task_work.c
taskstats.c
test_kprobes.c
torture.c
tracepoint.c
tsacct.c
ucount.c
uid16.c
up.c
user-return-notifier.c
user.c
user_namespace.c
utsname.c
utsname_sysctl.c
watchdog.c
watchdog_hld.c
workqueue.c
workqueue_internal.h
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
kernel
/
livepatch
/
patch.c
Blame
Blame
Latest commit
History
History
276 lines (223 loc) · 6.43 KB
Breadcrumbs
linux
/
kernel
/
livepatch
/
patch.c
Top
File metadata and controls
Code
Blame
276 lines (223 loc) · 6.43 KB
Raw
/* * patch.c - livepatch patching functions * * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> * Copyright (C) 2014 SUSE * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/livepatch.h> #include <linux/list.h> #include <linux/ftrace.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/bug.h> #include <linux/printk.h> #include "patch.h" #include "transition.h" static LIST_HEAD(klp_ops); struct klp_ops *klp_find_ops(unsigned long old_addr) { struct klp_ops *ops; struct klp_func *func; list_for_each_entry(ops, &klp_ops, node) { func = list_first_entry(&ops->func_stack, struct klp_func, stack_node); if (func->old_addr == old_addr) return ops; } return NULL; } static void notrace klp_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *fops, struct pt_regs *regs) { struct klp_ops *ops; struct klp_func *func; int patch_state; ops = container_of(fops, struct klp_ops, fops); /* * A variant of synchronize_sched() is used to allow patching functions * where RCU is not watching, see klp_synchronize_transition(). */ preempt_disable_notrace(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); /* * func should never be NULL because preemption should be disabled here * and unregister_ftrace_function() does the equivalent of a * synchronize_sched() before the func_stack removal. */ if (WARN_ON_ONCE(!func)) goto unlock; /* * In the enable path, enforce the order of the ops->func_stack and * func->transition reads. The corresponding write barrier is in * __klp_enable_patch(). * * (Note that this barrier technically isn't needed in the disable * path. In the rare case where klp_update_patch_state() runs before * this handler, its TIF_PATCH_PENDING read and this func->transition * read need to be ordered. But klp_update_patch_state() already * enforces that.) */ smp_rmb(); if (unlikely(func->transition)) { /* * Enforce the order of the func->transition and * current->patch_state reads. Otherwise we could read an * out-of-date task state and pick the wrong function. The * corresponding write barrier is in klp_init_transition(). */ smp_rmb(); patch_state = current->patch_state; WARN_ON_ONCE(patch_state == KLP_UNDEFINED); if (patch_state == KLP_UNPATCHED) { /* * Use the previously patched version of the function. * If no previous patches exist, continue with the * original function. */ func = list_entry_rcu(func->stack_node.next, struct klp_func, stack_node); if (&func->stack_node == &ops->func_stack) goto unlock; } } klp_arch_set_pc(regs, (unsigned long)func->new_func); unlock: preempt_enable_notrace(); } /* * Convert a function address into the appropriate ftrace location. * * Usually this is just the address of the function, but on some architectures * it's more complicated so allow them to provide a custom behaviour. */ #ifndef klp_get_ftrace_location static unsigned long klp_get_ftrace_location(unsigned long faddr) { return faddr; } #endif static void klp_unpatch_func(struct klp_func *func) { struct klp_ops *ops; if (WARN_ON(!func->patched)) return; if (WARN_ON(!func->old_addr)) return; ops = klp_find_ops(func->old_addr); if (WARN_ON(!ops)) return; if (list_is_singular(&ops->func_stack)) { unsigned long ftrace_loc; ftrace_loc = klp_get_ftrace_location(func->old_addr); if (WARN_ON(!ftrace_loc)) return; WARN_ON(unregister_ftrace_function(&ops->fops)); WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); list_del_rcu(&func->stack_node); list_del(&ops->node); kfree(ops); } else { list_del_rcu(&func->stack_node); } func->patched = false; } static int klp_patch_func(struct klp_func *func) { struct klp_ops *ops; int ret; if (WARN_ON(!func->old_addr)) return -EINVAL; if (WARN_ON(func->patched)) return -EINVAL; ops = klp_find_ops(func->old_addr); if (!ops) { unsigned long ftrace_loc; ftrace_loc = klp_get_ftrace_location(func->old_addr); if (!ftrace_loc) { pr_err("failed to find location for function '%s'\n", func->old_name); return -EINVAL; } ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) return -ENOMEM; ops->fops.func = klp_ftrace_handler; ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_IPMODIFY; list_add(&ops->node, &klp_ops); INIT_LIST_HEAD(&ops->func_stack); list_add_rcu(&func->stack_node, &ops->func_stack); ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); if (ret) { pr_err("failed to set ftrace filter for function '%s' (%d)\n", func->old_name, ret); goto err; } ret = register_ftrace_function(&ops->fops); if (ret) { pr_err("failed to register ftrace handler for function '%s' (%d)\n", func->old_name, ret); ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); goto err; } } else { list_add_rcu(&func->stack_node, &ops->func_stack); } func->patched = true; return 0; err: list_del_rcu(&func->stack_node); list_del(&ops->node); kfree(ops); return ret; } void klp_unpatch_object(struct klp_object *obj) { struct klp_func *func; klp_for_each_func(obj, func) if (func->patched) klp_unpatch_func(func); obj->patched = false; } int klp_patch_object(struct klp_object *obj) { struct klp_func *func; int ret; if (WARN_ON(obj->patched)) return -EINVAL; klp_for_each_func(obj, func) { ret = klp_patch_func(func); if (ret) { klp_unpatch_object(obj); return ret; } } obj->patched = true; return 0; } void klp_unpatch_objects(struct klp_patch *patch) { struct klp_object *obj; klp_for_each_object(patch, obj) if (obj->patched) klp_unpatch_object(obj); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
You can’t perform that action at this time.