Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
c17f973
Documentation
LICENSES
arch
block
certs
crypto
drivers
fs
include
init
io_uring
ipc
kernel
lib
mm
net
rust
samples
scripts
security
sound
tools
accounting
arch
bootconfig
bpf
build
certs
cgroup
counter
crypto
debugging
firewire
firmware
gpio
hv
iio
include
kvm/kvm_stat
laptop
leds
lib
memory-model
mm
net
objtool
pci
pcmcia
perf
power
rcu
sched_ext
scripts
sound
spi
testing
crypto/chacha20-s390
cxl
fault-injection
ktest
kunit
memblock
nvdimm
radix-tree
scatterlist
selftests
acct
alsa
amd-pstate
arm64
bpf
benchs
bpf_test_no_cfi
bpf_testmod
gnu
map_tests
prog_tests
progs
access_map_in_map.c
arena_atomics.c
arena_htab.c
arena_htab_asm.c
arena_list.c
async_stack_depth.c
atomic_bounds.c
atomics.c
bad_struct_ops.c
bad_struct_ops2.c
bench_local_storage_create.c
bind4_prog.c
bind6_prog.c
bind_perm.c
bind_prog.h
bloom_filter_bench.c
bloom_filter_map.c
bpf_cc_cubic.c
bpf_compiler.h
bpf_cubic.c
bpf_dctcp.c
bpf_dctcp_release.c
bpf_flow.c
bpf_hashmap_full_update_bench.c
bpf_hashmap_lookup.c
bpf_iter.h
bpf_iter_bpf_array_map.c
bpf_iter_bpf_hash_map.c
bpf_iter_bpf_link.c
bpf_iter_bpf_map.c
bpf_iter_bpf_percpu_array_map.c
bpf_iter_bpf_percpu_hash_map.c
bpf_iter_bpf_sk_storage_helpers.c
bpf_iter_bpf_sk_storage_map.c
bpf_iter_ipv6_route.c
bpf_iter_ksym.c
bpf_iter_netlink.c
bpf_iter_setsockopt.c
bpf_iter_setsockopt_unix.c
bpf_iter_sockmap.c
bpf_iter_task_btf.c
bpf_iter_task_file.c
bpf_iter_task_stack.c
bpf_iter_task_vmas.c
bpf_iter_tasks.c
bpf_iter_tcp4.c
bpf_iter_tcp6.c
bpf_iter_test_kern1.c
bpf_iter_test_kern2.c
bpf_iter_test_kern3.c
bpf_iter_test_kern4.c
bpf_iter_test_kern5.c
bpf_iter_test_kern6.c
bpf_iter_test_kern_common.h
bpf_iter_udp4.c
bpf_iter_udp6.c
bpf_iter_unix.c
bpf_iter_vma_offset.c
bpf_loop.c
bpf_loop_bench.c
bpf_misc.h
bpf_mod_race.c
bpf_syscall_macro.c
bpf_tcp_nogpl.c
bpf_tracing_net.h
bprm_opts.c
btf__core_reloc_arrays.c
btf__core_reloc_arrays___diff_arr_dim.c
btf__core_reloc_arrays___diff_arr_val_sz.c
btf__core_reloc_arrays___equiv_zero_sz_arr.c
btf__core_reloc_arrays___err_bad_zero_sz_arr.c
btf__core_reloc_arrays___err_non_array.c
btf__core_reloc_arrays___err_too_shallow.c
btf__core_reloc_arrays___err_too_small.c
btf__core_reloc_arrays___err_wrong_val_type.c
btf__core_reloc_arrays___fixed_arr.c
btf__core_reloc_bitfields.c
btf__core_reloc_bitfields___bit_sz_change.c
btf__core_reloc_bitfields___bitfield_vs_int.c
btf__core_reloc_bitfields___err_too_big_bitfield.c
btf__core_reloc_bitfields___just_big_enough.c
btf__core_reloc_enum64val.c
btf__core_reloc_enum64val___diff.c
btf__core_reloc_enum64val___err_missing.c
btf__core_reloc_enum64val___val3_missing.c
btf__core_reloc_enumval.c
btf__core_reloc_enumval___diff.c
btf__core_reloc_enumval___err_missing.c
btf__core_reloc_enumval___val3_missing.c
btf__core_reloc_existence.c
btf__core_reloc_existence___minimal.c
btf__core_reloc_existence___wrong_field_defs.c
btf__core_reloc_flavors.c
btf__core_reloc_flavors__err_wrong_name.c
btf__core_reloc_ints.c
btf__core_reloc_ints___bool.c
btf__core_reloc_ints___reverse_sign.c
btf__core_reloc_misc.c
btf__core_reloc_mods.c
btf__core_reloc_mods___mod_swap.c
btf__core_reloc_mods___typedefs.c
btf__core_reloc_nesting.c
btf__core_reloc_nesting___anon_embed.c
btf__core_reloc_nesting___dup_compat_types.c
btf__core_reloc_nesting___err_array_container.c
btf__core_reloc_nesting___err_array_field.c
btf__core_reloc_nesting___err_dup_incompat_types.c
btf__core_reloc_nesting___err_missing_container.c
btf__core_reloc_nesting___err_missing_field.c
btf__core_reloc_nesting___err_nonstruct_container.c
btf__core_reloc_nesting___err_partial_match_dups.c
btf__core_reloc_nesting___err_too_deep.c
btf__core_reloc_nesting___extra_nesting.c
btf__core_reloc_nesting___struct_union_mixup.c
btf__core_reloc_primitives.c
btf__core_reloc_primitives___diff_enum_def.c
btf__core_reloc_primitives___diff_func_proto.c
btf__core_reloc_primitives___diff_ptr_type.c
btf__core_reloc_primitives___err_non_enum.c
btf__core_reloc_primitives___err_non_int.c
btf__core_reloc_primitives___err_non_ptr.c
btf__core_reloc_ptr_as_arr.c
btf__core_reloc_ptr_as_arr___diff_sz.c
btf__core_reloc_size.c
btf__core_reloc_size___diff_offs.c
btf__core_reloc_size___diff_sz.c
btf__core_reloc_size___err_ambiguous.c
btf__core_reloc_type_based.c
btf__core_reloc_type_based___all_missing.c
btf__core_reloc_type_based___diff.c
btf__core_reloc_type_based___diff_sz.c
btf__core_reloc_type_based___fn_wrong_args.c
btf__core_reloc_type_based___incompat.c
btf__core_reloc_type_id.c
btf__core_reloc_type_id___missing_targets.c
btf_data.c
btf_dump_test_case_bitfields.c
btf_dump_test_case_multidim.c
btf_dump_test_case_namespacing.c
btf_dump_test_case_ordering.c
btf_dump_test_case_packing.c
btf_dump_test_case_padding.c
btf_dump_test_case_syntax.c
btf_ptr.h
btf_type_tag.c
btf_type_tag_percpu.c
btf_type_tag_user.c
cb_refs.c
cg_storage_multi.h
cg_storage_multi_egress_only.c
cg_storage_multi_isolated.c
cg_storage_multi_shared.c
cgroup_ancestor.c
cgroup_getset_retval_getsockopt.c
cgroup_getset_retval_hooks.c
cgroup_getset_retval_setsockopt.c
cgroup_hierarchical_stats.c
cgroup_iter.c
cgroup_skb_sk_lookup_kern.c
cgroup_storage.c
cgroup_tcp_skb.c
cgrp_kfunc_common.h
cgrp_kfunc_failure.c
cgrp_kfunc_success.c
cgrp_ls_attach_cgroup.c
cgrp_ls_negative.c
cgrp_ls_recursion.c
cgrp_ls_sleepable.c
cgrp_ls_tp_btf.c
connect4_dropper.c
connect4_prog.c
connect6_prog.c
connect_force_port4.c
connect_force_port6.c
connect_ping.c
connect_unix_prog.c
core_kern.c
core_kern_overflow.c
core_reloc_types.h
cpumask_common.h
cpumask_failure.c
cpumask_success.c
crypto_basic.c
crypto_bench.c
crypto_common.h
crypto_sanity.c
decap_sanity.c
dev_cgroup.c
dummy_st_ops_fail.c
dummy_st_ops_success.c
dynptr_fail.c
dynptr_success.c
empty_skb.c
epilogue_exit.c
epilogue_tailcall.c
err.h
exceptions.c
exceptions_assert.c
exceptions_ext.c
exceptions_fail.c
exhandler_kern.c
fentry_many_args.c
fentry_recursive.c
fentry_recursive_target.c
fentry_test.c
fexit_bpf2bpf.c
fexit_bpf2bpf_simple.c
fexit_many_args.c
fexit_sleep.c
fexit_test.c
fib_lookup.c
find_vma.c
find_vma_fail1.c
find_vma_fail2.c
fmod_ret_freplace.c
for_each_array_map_elem.c
for_each_hash_map_elem.c
for_each_map_elem_write_key.c
for_each_multi_maps.c
freplace_attach_probe.c
freplace_cls_redirect.c
freplace_connect4.c
freplace_connect_v4_prog.c
freplace_dead_global_func.c
freplace_get_constant.c
freplace_global_func.c
freplace_progmap.c
freplace_unreliable_prog.c
get_branch_snapshot.c
get_cgroup_id_kern.c
get_func_args_test.c
get_func_ip_test.c
get_func_ip_uprobe_test.c
getpeername4_prog.c
getpeername6_prog.c
getpeername_unix_prog.c
getsockname4_prog.c
getsockname6_prog.c
getsockname_unix_prog.c
htab_mem_bench.c
htab_reuse.c
htab_update.c
ima.c
inner_array_lookup.c
ip_check_defrag.c
iters.c
iters_css.c
iters_css_task.c
iters_looping.c
iters_num.c
iters_state_safety.c
iters_task.c
iters_task_failure.c
iters_task_vma.c
iters_testmod.c
iters_testmod_seq.c
jeq_infer_not_null_fail.c
jit_probe_mem.c
kfree_skb.c
kfunc_call_destructive.c
kfunc_call_fail.c
kfunc_call_race.c
kfunc_call_test.c
kfunc_call_test_subprog.c
kmem_cache_iter.c
kprobe_multi.c
kprobe_multi_empty.c
kprobe_multi_override.c
kprobe_multi_session.c
kprobe_multi_session_cookie.c
kptr_xchg_inline.c
ksym_race.c
linked_funcs1.c
linked_funcs2.c
linked_list.c
linked_list.h
linked_list_fail.c
linked_maps1.c
linked_maps2.c
linked_vars1.c
linked_vars2.c
load_bytes_relative.c
local_kptr_stash.c
local_kptr_stash_fail.c
local_storage.c
local_storage_bench.c
local_storage_rcu_tasks_trace_bench.c
loop1.c
loop2.c
loop3.c
loop4.c
loop5.c
loop6.c
lru_bug.c
lsm.c
lsm_cgroup.c
lsm_cgroup_nonvoid.c
lsm_tailcall.c
map_in_map_btf.c
map_kptr.c
map_kptr_fail.c
map_percpu_stats.c
map_ptr_kern.c
metadata_unused.c
metadata_used.c
missed_kprobe.c
missed_kprobe_recursion.c
missed_tp_recursion.c
mmap_inner_array.c
modify_return.c
mptcp_sock.c
mptcpify.c
nested_acquire.c
nested_trust_common.h
nested_trust_failure.c
nested_trust_success.c
netcnt_prog.c
netif_receive_skb.c
netns_cookie_prog.c
normal_map_btf.c
percpu_alloc_array.c
percpu_alloc_cgrp_local_storage.c
percpu_alloc_fail.c
perf_event_stackmap.c
perfbuf_bench.c
preempt_lock.c
preempted_bpf_ma_op.c
priv_map.c
priv_prog.c
pro_epilogue.c
pro_epilogue_goto_start.c
profiler.h
profiler.inc.h
profiler1.c
profiler2.c
profiler3.c
pyperf.h
pyperf100.c
pyperf180.c
pyperf50.c
pyperf600.c
pyperf600_bpf_loop.c
pyperf600_iter.c
pyperf600_nounroll.c
pyperf_global.c
pyperf_subprogs.c
rbtree.c
rbtree_btf_fail__add_wrong_type.c
rbtree_btf_fail__wrong_node_type.c
rbtree_fail.c
rcu_read_lock.c
rcu_tasks_trace_gp.c
read_bpf_task_storage_busy.c
read_vsyscall.c
recursion.c
recvmsg4_prog.c
recvmsg6_prog.c
recvmsg_unix_prog.c
refcounted_kptr.c
refcounted_kptr_fail.c
ringbuf_bench.c
sendmsg4_prog.c
sendmsg6_prog.c
sendmsg_unix_prog.c
setget_sockopt.c
sk_storage_omem_uncharge.c
skb_load_bytes.c
skb_pkt_end.c
sock_addr_kern.c
sock_destroy_prog.c
sock_destroy_prog_fail.c
sock_iter_batch.c
socket_cookie_prog.c
sockmap_parse_prog.c
sockmap_tcp_msg_prog.c
sockmap_verdict_prog.c
sockopt_inherit.c
sockopt_multi.c
sockopt_qos_to_cc.c
sockopt_sk.c
stacktrace_map_skip.c
strncmp_bench.c
strncmp_test.c
strobemeta.c
strobemeta.h
strobemeta_bpf_loop.c
strobemeta_nounroll1.c
strobemeta_nounroll2.c
strobemeta_subprogs.c
struct_ops_autocreate.c
struct_ops_autocreate2.c
struct_ops_detach.c
struct_ops_forgotten_cb.c
struct_ops_maybe_null.c
struct_ops_maybe_null_fail.c
struct_ops_module.c
struct_ops_multi_pages.c
struct_ops_nulled_out_cb.c
syscall.c
tailcall1.c
tailcall2.c
tailcall3.c
tailcall4.c
tailcall5.c
tailcall6.c
tailcall_bpf2bpf1.c
tailcall_bpf2bpf2.c
tailcall_bpf2bpf3.c
tailcall_bpf2bpf4.c
tailcall_bpf2bpf6.c
tailcall_bpf2bpf_fentry.c
tailcall_bpf2bpf_fexit.c
tailcall_bpf2bpf_hierarchy1.c
tailcall_bpf2bpf_hierarchy2.c
tailcall_bpf2bpf_hierarchy3.c
tailcall_bpf2bpf_hierarchy_fentry.c
tailcall_freplace.c
tailcall_poke.c
task_kfunc_common.h
task_kfunc_failure.c
task_kfunc_success.c
task_local_storage.c
task_local_storage_exit_creds.c
task_ls_recursion.c
task_storage_nodeadlock.c
tc_bpf2bpf.c
tc_dummy.c
tcp_ca_incompl_cong_ops.c
tcp_ca_kfunc.c
tcp_ca_unsupp_cong_op.c
tcp_ca_update.c
tcp_ca_write_sk_pacing.c
tcp_rtt.c
test_access_variable_array.c
test_assign_reuse.c
test_attach_kprobe_sleepable.c
test_attach_probe.c
test_attach_probe_manual.c
test_autoattach.c
test_autoload.c
test_bpf_cookie.c
test_bpf_ma.c
test_bpf_nf.c
test_bpf_nf_fail.c
test_btf_decl_tag.c
test_btf_map_in_map.c
test_btf_newkv.c
test_btf_nokv.c
test_btf_skc_cls_ingress.c
test_build_id.c
test_cgroup1_hierarchy.c
test_cgroup_link.c
test_check_mtu.c
test_cls_redirect.c
test_cls_redirect.h
test_cls_redirect_dynptr.c
test_cls_redirect_subprogs.c
test_core_autosize.c
test_core_extern.c
test_core_read_macros.c
test_core_reloc_arrays.c
test_core_reloc_bitfields_direct.c
test_core_reloc_bitfields_probed.c
test_core_reloc_enum64val.c
test_core_reloc_enumval.c
test_core_reloc_existence.c
test_core_reloc_flavors.c
test_core_reloc_ints.c
test_core_reloc_kernel.c
test_core_reloc_misc.c
test_core_reloc_mods.c
test_core_reloc_module.c
test_core_reloc_nesting.c
test_core_reloc_primitives.c
test_core_reloc_ptr_as_arr.c
test_core_reloc_size.c
test_core_reloc_type_based.c
test_core_reloc_type_id.c
test_core_retro.c
test_custom_sec_handlers.c
test_d_path.c
test_d_path_check_rdonly_mem.c
test_d_path_check_types.c
test_deny_namespace.c
test_enable_stats.c
test_endian.c
test_fill_link_info.c
test_fsverity.c
test_get_stack_rawtp.c
test_get_stack_rawtp_err.c
test_get_xattr.c
test_global_data.c
test_global_func1.c
test_global_func10.c
test_global_func11.c
test_global_func12.c
test_global_func13.c
test_global_func14.c
test_global_func15.c
test_global_func16.c
test_global_func17.c
test_global_func2.c
test_global_func3.c
test_global_func4.c
test_global_func5.c
test_global_func6.c
test_global_func7.c
test_global_func8.c
test_global_func9.c
test_global_func_args.c
test_global_func_ctx_args.c
test_global_map_resize.c
test_hash_large_key.c
test_helper_restricted.c
test_jhash.h
test_kfunc_dynptr_param.c
test_kfunc_param_nullable.c
test_ksyms.c
test_ksyms_btf.c
test_ksyms_btf_null_check.c
test_ksyms_btf_write_check.c
test_ksyms_module.c
test_ksyms_weak.c
test_l4lb.c
test_l4lb_noinline.c
test_l4lb_noinline_dynptr.c
test_ldsx_insn.c
test_legacy_printk.c
test_libbpf_get_fd_by_id_opts.c
test_link_pinning.c
test_lirc_mode2_kern.c
test_log_buf.c
test_log_fixup.c
test_lookup_and_delete.c
test_lookup_key.c
test_lwt_ip_encap.c
test_lwt_redirect.c
test_lwt_reroute.c
test_lwt_seg6local.c
test_map_in_map.c
test_map_in_map_invalid.c
test_map_init.c
test_map_lock.c
test_map_lookup_percpu_elem.c
test_map_ops.c
test_migrate_reuseport.c
test_misc_tcp_hdr_options.c
test_mmap.c
test_module_attach.c
test_netfilter_link_attach.c
test_ns_current_pid_tgid.c
test_obj_id.c
test_overhead.c
test_parse_tcp_hdr_opt.c
test_parse_tcp_hdr_opt_dynptr.c
test_pe_preserve_elems.c
test_perf_branches.c
test_perf_buffer.c
test_perf_link.c
test_perf_skip.c
test_pinning.c
test_pinning_invalid.c
test_pkt_access.c
test_pkt_md_access.c
test_probe_read_user_str.c
test_probe_user.c
test_prog_array_init.c
test_ptr_untrusted.c
test_queue_map.c
test_queue_stack_map.h
test_raw_tp_test_run.c
test_rdonly_maps.c
test_ringbuf.c
test_ringbuf_map_key.c
test_ringbuf_multi.c
test_ringbuf_n.c
test_ringbuf_write.c
test_seg6_loop.c
test_select_reuseport_kern.c
test_send_signal_kern.c
test_sig_in_xattr.c
test_siphash.h
test_sk_assign.c
test_sk_assign_libbpf.c
test_sk_lookup.c
test_sk_lookup_kern.c
test_sk_storage_trace_itself.c
test_sk_storage_tracing.c
test_skb_ctx.c
test_skb_helpers.c
test_skc_to_unix_sock.c
test_skeleton.c
test_skmsg_load_helpers.c
test_snprintf.c
test_snprintf_single.c
test_sock_fields.c
test_sockhash_kern.c
test_sockmap_drop_prog.c
test_sockmap_invalid_update.c
test_sockmap_kern.c
test_sockmap_kern.h
test_sockmap_listen.c
test_sockmap_pass_prog.c
test_sockmap_progs_query.c
test_sockmap_skb_verdict_attach.c
test_sockmap_update.c
test_spin_lock.c
test_spin_lock_fail.c
test_stack_map.c
test_stack_var_off.c
test_stacktrace_build_id.c
test_stacktrace_map.c
test_static_linked1.c
test_static_linked2.c
test_subprogs.c
test_subprogs_extable.c
test_subprogs_unused.c
test_subskeleton.c
test_subskeleton_lib.c
test_subskeleton_lib2.c
test_sysctl_loop1.c
test_sysctl_loop2.c
test_sysctl_prog.c
test_task_pt_regs.c
test_task_under_cgroup.c
test_tc_bpf.c
test_tc_dtime.c
test_tc_edt.c
test_tc_link.c
test_tc_neigh.c
test_tc_neigh_fib.c
test_tc_peer.c
test_tc_tunnel.c
test_tcp_check_syncookie_kern.c
test_tcp_custom_syncookie.c
test_tcp_custom_syncookie.h
test_tcp_estats.c
test_tcp_hdr_options.c
test_tcpbpf_kern.c
test_tcpnotify_kern.c
test_time_tai.c
test_tp_btf_nullable.c
test_trace_ext.c
test_trace_ext_tracing.c
test_tracepoint.c
test_trampoline_count.c
test_tunnel_kern.c
test_unpriv_bpf_disabled.c
test_uprobe.c
test_uprobe_autoattach.c
test_urandom_usdt.c
test_usdt.c
test_usdt_multispec.c
test_user_ringbuf.h
test_varlen.c
test_verif_scale1.c
test_verif_scale2.c
test_verif_scale3.c
test_verify_pkcs7_sig.c
test_vmlinux.c
test_xdp.c
test_xdp_adjust_tail_grow.c
test_xdp_adjust_tail_shrink.c
test_xdp_attach_fail.c
test_xdp_bpf2bpf.c
test_xdp_context_test_run.c
test_xdp_devmap_helpers.c
test_xdp_do_redirect.c
test_xdp_dynptr.c
test_xdp_link.c
test_xdp_loop.c
test_xdp_meta.c
test_xdp_noinline.c
test_xdp_redirect.c
test_xdp_update_frags.c
test_xdp_vlan.c
test_xdp_with_cpumap_frags_helpers.c
test_xdp_with_cpumap_helpers.c
test_xdp_with_devmap_frags_helpers.c
test_xdp_with_devmap_helpers.c
timer.c
timer_crash.c
timer_failure.c
timer_lockup.c
timer_mim.c
timer_mim_reject.c
token_lsm.c
trace_dummy_st_ops.c
trace_printk.c
trace_vprintk.c
tracing_failure.c
tracing_struct.c
tracing_struct_many_args.c
trigger_bench.c
twfw.c
type_cast.c
udp_limit.c
uninit_stack.c
unsupported_ops.c
uprobe_multi.c
uprobe_multi_bench.c
uprobe_multi_consumers.c
uprobe_multi_pid_filter.c
uprobe_multi_usdt.c
uprobe_syscall.c
uprobe_syscall_executed.c
uretprobe_stack.c
user_ringbuf_fail.c
user_ringbuf_success.c
verifier_and.c
verifier_arena.c
verifier_arena_large.c
verifier_array_access.c
verifier_basic_stack.c
verifier_bitfield_write.c
verifier_bits_iter.c
verifier_bounds.c
verifier_bounds_deduction.c
verifier_bounds_deduction_non_const.c
verifier_bounds_mix_sign_unsign.c
verifier_bpf_fastcall.c
verifier_bpf_get_stack.c
verifier_bswap.c
verifier_btf_ctx_access.c
verifier_btf_unreliable_prog.c
verifier_cfg.c
verifier_cgroup_inv_retcode.c
verifier_cgroup_skb.c
verifier_cgroup_storage.c
verifier_const.c
verifier_const_or.c
verifier_ctx.c
verifier_ctx_sk_msg.c
verifier_d_path.c
verifier_direct_packet_access.c
verifier_direct_stack_access_wraparound.c
verifier_div0.c
verifier_div_overflow.c
verifier_global_ptr_args.c
verifier_global_subprogs.c
verifier_gotol.c
verifier_helper_access_var_len.c
verifier_helper_packet_access.c
verifier_helper_restricted.c
verifier_helper_value_access.c
verifier_int_ptr.c
verifier_iterating_callbacks.c
verifier_jeq_infer_not_null.c
verifier_jit_convergence.c
verifier_kfunc_prog_types.c
verifier_ld_ind.c
verifier_ldsx.c
verifier_leak_ptr.c
verifier_loops1.c
verifier_lsm.c
verifier_lwt.c
verifier_map_in_map.c
verifier_map_ptr.c
verifier_map_ptr_mixing.c
verifier_map_ret_val.c
verifier_masking.c
verifier_meta_access.c
verifier_movsx.c
verifier_netfilter_ctx.c
verifier_netfilter_retcode.c
verifier_or_jmp32_k.c
verifier_precision.c
verifier_prevent_map_lookup.c
verifier_raw_stack.c
verifier_raw_tp_writable.c
verifier_ref_tracking.c
verifier_reg_equal.c
verifier_regalloc.c
verifier_ringbuf.c
verifier_runtime_jit.c
verifier_scalar_ids.c
verifier_sdiv.c
verifier_search_pruning.c
verifier_sock.c
verifier_sock_addr.c
verifier_sockmap_mutate.c
verifier_spill_fill.c
verifier_spin_lock.c
verifier_stack_ptr.c
verifier_subprog_precision.c
verifier_subreg.c
verifier_tailcall_jit.c
verifier_typedef.c
verifier_uninit.c
verifier_unpriv.c
verifier_unpriv_perf.c
verifier_value.c
verifier_value_adj_spill.c
verifier_value_illegal_alu.c
verifier_value_or_null.c
verifier_value_ptr_arith.c
verifier_var_off.c
verifier_vfs_accept.c
verifier_vfs_reject.c
verifier_xadd.c
verifier_xdp.c
verifier_xdp_direct_packet_access.c
vrf_socket_lookup.c
wq.c
wq_failures.c
xdp_dummy.c
xdp_features.c
xdp_flowtable.c
xdp_hw_metadata.c
xdp_metadata.c
xdp_metadata2.c
xdp_redirect_map.c
xdp_redirect_multi_kern.c
xdp_synproxy_kern.c
xdp_tx.c
xdping_kern.c
xdpwall.c
xfrm_info.c
xsk_xdp_progs.c
verifier
.gitignore
DENYLIST
DENYLIST.aarch64
DENYLIST.riscv64
DENYLIST.s390x
Makefile
Makefile.docs
README.rst
autoconf_helper.h
bench.c
bench.h
bpf_arena_alloc.h
bpf_arena_common.h
bpf_arena_htab.h
bpf_arena_list.h
bpf_experimental.h
bpf_kfuncs.h
bpf_legacy.h
bpf_rand.h
bpf_sockopt_helpers.h
bpf_util.h
btf_helpers.c
btf_helpers.h
cap_helpers.c
cap_helpers.h
cgroup_getset_retval_hooks.h
cgroup_helpers.c
cgroup_helpers.h
cgroup_tcp_skb.h
config
config.aarch64
config.riscv64
config.s390x
config.vm
config.x86_64
disasm.c
disasm.h
disasm_helpers.c
disasm_helpers.h
flow_dissector_load.c
flow_dissector_load.h
generate_udp_fragments.py
ima_setup.sh
ip_check_defrag_frags.h
jit_disasm_helpers.c
jit_disasm_helpers.h
json_writer.c
json_writer.h
liburandom_read.map
netcnt_common.h
netlink_helpers.c
netlink_helpers.h
network_helpers.c
network_helpers.h
sdt-config.h
sdt.h
settings
task_local_storage_helpers.h
test_bpftool.py
test_bpftool.sh
test_bpftool_build.sh
test_bpftool_metadata.sh
test_bpftool_synctypes.py
test_btf.h
test_cpp.cpp
test_doc_build.sh
test_flow_dissector.c
test_flow_dissector.sh
test_ftrace.sh
test_iptunnel_common.h
test_kmod.sh
test_lirc_mode2.sh
test_lirc_mode2_user.c
test_loader.c
test_lpm_map.c
test_lru_map.c
test_lwt_ip_encap.sh
test_lwt_seg6local.sh
test_maps.c
test_maps.h
test_progs.c
test_progs.h
test_select_reuseport_common.h
test_sock.c
test_sockmap.c
test_sysctl.c
test_tag.c
test_tc_edt.sh
test_tc_tunnel.sh
test_tcp_check_syncookie.sh
test_tcp_check_syncookie_user.c
test_tcp_hdr_options.h
test_tcpbpf.h
test_tcpnotify.h
test_tcpnotify_user.c
test_tunnel.sh
test_verifier.c
test_xdp_features.sh
test_xdp_meta.sh
test_xdp_redirect.sh
test_xdp_redirect_multi.sh
test_xdp_vlan.sh
test_xdp_vlan_mode_generic.sh
test_xdp_vlan_mode_native.sh
test_xdping.sh
test_xsk.sh
testing_helpers.c
testing_helpers.h
trace_helpers.c
trace_helpers.h
unpriv_helpers.c
unpriv_helpers.h
uprobe_multi.c
uprobe_multi.ld
urandom_read.c
urandom_read_aux.c
urandom_read_lib1.c
urandom_read_lib2.c
verify_sig_setup.sh
veristat.c
veristat.cfg
vmtest.sh
with_addr.sh
with_tunnels.sh
xdp_features.c
xdp_features.h
xdp_hw_metadata.c
xdp_metadata.h
xdp_redirect_multi.c
xdp_synproxy.c
xdping.c
xdping.h
xsk.c
xsk.h
xsk_prereqs.sh
xsk_xdp_common.h
xskxceiver.c
xskxceiver.h
breakpoints
cachestat
capabilities
cgroup
clone3
connector
core
cpu-hotplug
cpufreq
damon
devices
dma
dmabuf-heaps
drivers
dt
efivarfs
exec
fchmodat2
filelock
filesystems
firmware
fpu
ftrace
futex
gpio
hid
ia64
intel_pstate
iommu
ipc
ir
kcmp
kexec
kmod
kselftest
kvm
landlock
lib
livepatch
lkdtm
locking
lsm
media_tests
membarrier
memfd
memory-hotplug
mincore
mm
mount
mount_setattr
move_mount_set_group
mqueue
nci
net
nolibc
nsfs
ntb
openat2
perf_events
pid_namespace
pidfd
power_supply
powerpc
prctl
proc
pstore
ptp
ptrace
rcutorture
resctrl
ring-buffer
riscv
rlimits
rseq
rtc
rust
safesetid
sched
sched_ext
seccomp
sgx
sigaltstack
size
sparc64
splice
static_keys
sync
syscall_user_dispatch
sysctl
tc-testing
tdx
thermal
timens
timers
tmpfs
tpm2
tty
turbostat
uevent
user_events
vDSO
watchdog
wireguard
x86
zram
.gitignore
Makefile
gen_kselftest_tar.sh
kselftest.h
kselftest_deps.sh
kselftest_harness.h
kselftest_install.sh
kselftest_module.h
lib.mk
run_kselftest.sh
shared
vma
vsock
thermal
time
tracing
usb
verification
virtio
wmi
workqueue
writeback
Makefile
usr
virt
.clang-format
.cocciconfig
.editorconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
.rustfmt.toml
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
tools
/
testing
/
selftests
/
bpf
/
progs
/
verifier_sock.c
Copy path
Blame
Blame
Latest commit
History
History
1040 lines (987 loc) · 26.1 KB
Breadcrumbs
linux
/
tools
/
testing
/
selftests
/
bpf
/
progs
/
verifier_sock.c
Top
File metadata and controls
Code
Blame
1040 lines (987 loc) · 26.1 KB
Raw
// SPDX-License-Identifier: GPL-2.0 /* Converted from tools/testing/selftests/bpf/verifier/sock.c */ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) struct { __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); __uint(max_entries, 1); __type(key, __u32); __type(value, __u64); } map_reuseport_array SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_SOCKHASH); __uint(max_entries, 1); __type(key, int); __type(value, int); } map_sockhash SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(max_entries, 1); __type(key, int); __type(value, int); } map_sockmap SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_XSKMAP); __uint(max_entries, 1); __type(key, int); __type(value, int); } map_xskmap SEC(".maps"); struct val { int cnt; struct bpf_spin_lock l; }; struct { __uint(type, BPF_MAP_TYPE_SK_STORAGE); __uint(max_entries, 0); __type(key, int); __type(value, struct val); __uint(map_flags, BPF_F_NO_PREALLOC); } sk_storage_map SEC(".maps"); SEC("cgroup/skb") __description("skb->sk: no NULL check") __failure __msg("invalid mem access 'sock_common_or_null'") __failure_unpriv __naked void skb_sk_no_null_check(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ r0 = *(u32*)(r1 + 0); \ r0 = 0; \ exit; \ " : : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/skb") __description("skb->sk: sk->family [non fullsock field]") __success __success_unpriv __retval(0) __naked void sk_family_non_fullsock_field_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \ r0 = 0; \ exit; \ " : : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) : __clobber_all); } SEC("cgroup/skb") __description("skb->sk: sk->type [fullsock field]") __failure __msg("invalid sock_common access") __failure_unpriv __naked void sk_sk_type_fullsock_field_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \ r0 = 0; \ exit; \ " : : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_sk_fullsock(skb->sk): no !skb->sk check") __failure __msg("type=sock_common_or_null expected=sock_common") __failure_unpriv __naked void sk_no_skb_sk_check_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ call %[bpf_sk_fullsock]; \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): no NULL check on ret") __failure __msg("invalid mem access 'sock_or_null'") __failure_unpriv __naked void no_null_check_on_ret_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ r0 = *(u32*)(r0 + %[bpf_sock_type]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->type [fullsock field]") __success __success_unpriv __retval(0) __naked void sk_sk_type_fullsock_field_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->family [non fullsock field]") __success __success_unpriv __retval(0) __naked void sk_family_non_fullsock_field_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->state [narrow load]") __success __success_unpriv __retval(0) __naked void sk_sk_state_narrow_load(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)") __success __success_unpriv __retval(0) __naked void port_word_load_backward_compatibility(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->dst_port [half load]") __success __success_unpriv __retval(0) __naked void sk_dst_port_half_load(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)") __failure __msg("invalid sock access") __failure_unpriv __naked void dst_port_half_load_invalid_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->dst_port [byte load]") __success __success_unpriv __retval(0) __naked void sk_dst_port_byte_load(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \ r2 = *(u8*)(r0 + %[__imm_0]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)") __failure __msg("invalid sock access") __failure_unpriv __naked void dst_port_byte_load_invalid(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)") __failure __msg("invalid sock access") __failure_unpriv __naked void dst_port_half_load_invalid_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]") __success __success_unpriv __retval(0) __naked void dst_ip6_load_2nd_byte(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->type [narrow load]") __success __success_unpriv __retval(0) __naked void sk_sk_type_narrow_load(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): sk->protocol [narrow load]") __success __success_unpriv __retval(0) __naked void sk_sk_protocol_narrow_load(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol)) : __clobber_all); } SEC("cgroup/skb") __description("sk_fullsock(skb->sk): beyond last field") __failure __msg("invalid sock access") __failure_unpriv __naked void skb_sk_beyond_last_field_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_tcp_sock(skb->sk): no !skb->sk check") __failure __msg("type=sock_common_or_null expected=sock_common") __failure_unpriv __naked void sk_no_skb_sk_check_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ call %[bpf_tcp_sock]; \ r0 = 0; \ exit; \ " : : __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_tcp_sock(skb->sk): no NULL check on ret") __failure __msg("invalid mem access 'tcp_sock_or_null'") __failure_unpriv __naked void no_null_check_on_ret_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_tcp_sock]; \ r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ r0 = 0; \ exit; \ " : : __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd") __success __success_unpriv __retval(0) __naked void skb_sk_tp_snd_cwnd_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_tcp_sock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ r0 = 0; \ exit; \ " : : __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_tcp_sock(skb->sk): tp->bytes_acked") __success __success_unpriv __retval(0) __naked void skb_sk_tp_bytes_acked(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_tcp_sock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \ r0 = 0; \ exit; \ " : : __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_tcp_sock(skb->sk): beyond last field") __failure __msg("invalid tcp_sock access") __failure_unpriv __naked void skb_sk_beyond_last_field_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_tcp_sock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\ r0 = 0; \ exit; \ " : : __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) : __clobber_all); } SEC("cgroup/skb") __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd") __success __success_unpriv __retval(0) __naked void skb_sk_tp_snd_cwnd_2(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r1 = r0; \ call %[bpf_tcp_sock]; \ if r0 != 0 goto l2_%=; \ exit; \ l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) : __clobber_all); } SEC("tc") __description("bpf_sk_release(skb->sk)") __failure __msg("R1 must be referenced when passed to release function") __naked void bpf_sk_release_skb_sk(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 == 0 goto l0_%=; \ call %[bpf_sk_release]; \ l0_%=: r0 = 0; \ exit; \ " : : __imm(bpf_sk_release), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("tc") __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))") __failure __msg("R1 must be referenced when passed to release function") __naked void bpf_sk_fullsock_skb_sk(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r1 = r0; \ call %[bpf_sk_release]; \ r0 = 1; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm(bpf_sk_release), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("tc") __description("bpf_sk_release(bpf_tcp_sock(skb->sk))") __failure __msg("R1 must be referenced when passed to release function") __naked void bpf_tcp_sock_skb_sk(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_tcp_sock]; \ if r0 != 0 goto l1_%=; \ exit; \ l1_%=: r1 = r0; \ call %[bpf_sk_release]; \ r0 = 1; \ exit; \ " : : __imm(bpf_sk_release), __imm(bpf_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("tc") __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL") __success __retval(0) __naked void sk_null_0_value_null(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r4 = 0; \ r3 = 0; \ r2 = r0; \ r1 = %[sk_storage_map] ll; \ call %[bpf_sk_storage_get]; \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm(bpf_sk_storage_get), __imm_addr(sk_storage_map), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("tc") __description("sk_storage_get(map, skb->sk, 1, 1): value == 1") __failure __msg("R3 type=scalar expected=fp") __naked void sk_1_1_value_1(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r4 = 1; \ r3 = 1; \ r2 = r0; \ r1 = %[sk_storage_map] ll; \ call %[bpf_sk_storage_get]; \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm(bpf_sk_storage_get), __imm_addr(sk_storage_map), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("tc") __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value") __success __retval(0) __naked void stack_value_1_stack_value(void) { asm volatile (" \ r2 = 0; \ *(u64*)(r10 - 8) = r2; \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: call %[bpf_sk_fullsock]; \ if r0 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r4 = 1; \ r3 = r10; \ r3 += -8; \ r2 = r0; \ r1 = %[sk_storage_map] ll; \ call %[bpf_sk_storage_get]; \ r0 = 0; \ exit; \ " : : __imm(bpf_sk_fullsock), __imm(bpf_sk_storage_get), __imm_addr(sk_storage_map), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("tc") __description("bpf_map_lookup_elem(smap, &key)") __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem") __naked void map_lookup_elem_smap_key(void) { asm volatile (" \ r1 = 0; \ *(u32*)(r10 - 4) = r1; \ r2 = r10; \ r2 += -4; \ r1 = %[sk_storage_map] ll; \ call %[bpf_map_lookup_elem]; \ r0 = 0; \ exit; \ " : : __imm(bpf_map_lookup_elem), __imm_addr(sk_storage_map) : __clobber_all); } SEC("xdp") __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id") __success __retval(0) __naked void xskmap_key_xs_queue_id(void) { asm volatile (" \ r1 = 0; \ *(u32*)(r10 - 8) = r1; \ r2 = r10; \ r2 += -8; \ r1 = %[map_xskmap] ll; \ call %[bpf_map_lookup_elem]; \ if r0 != 0 goto l0_%=; \ exit; \ l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \ r0 = 0; \ exit; \ " : : __imm(bpf_map_lookup_elem), __imm_addr(map_xskmap), __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) : __clobber_all); } SEC("sk_skb") __description("bpf_map_lookup_elem(sockmap, &key)") __failure __msg("Unreleased reference id=2 alloc_insn=6") __naked void map_lookup_elem_sockmap_key(void) { asm volatile (" \ r1 = 0; \ *(u32*)(r10 - 4) = r1; \ r2 = r10; \ r2 += -4; \ r1 = %[map_sockmap] ll; \ call %[bpf_map_lookup_elem]; \ r0 = 0; \ exit; \ " : : __imm(bpf_map_lookup_elem), __imm_addr(map_sockmap) : __clobber_all); } SEC("sk_skb") __description("bpf_map_lookup_elem(sockhash, &key)") __failure __msg("Unreleased reference id=2 alloc_insn=6") __naked void map_lookup_elem_sockhash_key(void) { asm volatile (" \ r1 = 0; \ *(u32*)(r10 - 4) = r1; \ r2 = r10; \ r2 += -4; \ r1 = %[map_sockhash] ll; \ call %[bpf_map_lookup_elem]; \ r0 = 0; \ exit; \ " : : __imm(bpf_map_lookup_elem), __imm_addr(map_sockhash) : __clobber_all); } SEC("sk_skb") __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)") __success __naked void field_bpf_sk_release_sk_1(void) { asm volatile (" \ r1 = 0; \ *(u32*)(r10 - 4) = r1; \ r2 = r10; \ r2 += -4; \ r1 = %[map_sockmap] ll; \ call %[bpf_map_lookup_elem]; \ if r0 != 0 goto l0_%=; \ exit; \ l0_%=: r1 = r0; \ r0 = *(u32*)(r0 + %[bpf_sock_type]); \ call %[bpf_sk_release]; \ exit; \ " : : __imm(bpf_map_lookup_elem), __imm(bpf_sk_release), __imm_addr(map_sockmap), __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) : __clobber_all); } SEC("sk_skb") __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)") __success __naked void field_bpf_sk_release_sk_2(void) { asm volatile (" \ r1 = 0; \ *(u32*)(r10 - 4) = r1; \ r2 = r10; \ r2 += -4; \ r1 = %[map_sockhash] ll; \ call %[bpf_map_lookup_elem]; \ if r0 != 0 goto l0_%=; \ exit; \ l0_%=: r1 = r0; \ r0 = *(u32*)(r0 + %[bpf_sock_type]); \ call %[bpf_sk_release]; \ exit; \ " : : __imm(bpf_map_lookup_elem), __imm(bpf_sk_release), __imm_addr(map_sockhash), __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) : __clobber_all); } SEC("sk_reuseport") __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)") __success __naked void ctx_reuseport_array_key_flags(void) { asm volatile (" \ r4 = 0; \ r2 = 0; \ *(u32*)(r10 - 4) = r2; \ r3 = r10; \ r3 += -4; \ r2 = %[map_reuseport_array] ll; \ call %[bpf_sk_select_reuseport]; \ exit; \ " : : __imm(bpf_sk_select_reuseport), __imm_addr(map_reuseport_array) : __clobber_all); } SEC("sk_reuseport") __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)") __success __naked void reuseport_ctx_sockmap_key_flags(void) { asm volatile (" \ r4 = 0; \ r2 = 0; \ *(u32*)(r10 - 4) = r2; \ r3 = r10; \ r3 += -4; \ r2 = %[map_sockmap] ll; \ call %[bpf_sk_select_reuseport]; \ exit; \ " : : __imm(bpf_sk_select_reuseport), __imm_addr(map_sockmap) : __clobber_all); } SEC("sk_reuseport") __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)") __success __naked void reuseport_ctx_sockhash_key_flags(void) { asm volatile (" \ r4 = 0; \ r2 = 0; \ *(u32*)(r10 - 4) = r2; \ r3 = r10; \ r3 += -4; \ r2 = %[map_sockmap] ll; \ call %[bpf_sk_select_reuseport]; \ exit; \ " : : __imm(bpf_sk_select_reuseport), __imm_addr(map_sockmap) : __clobber_all); } SEC("tc") __description("mark null check on return value of bpf_skc_to helpers") __failure __msg("invalid mem access") __naked void of_bpf_skc_to_helpers(void) { asm volatile (" \ r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ if r1 != 0 goto l0_%=; \ r0 = 0; \ exit; \ l0_%=: r6 = r1; \ call %[bpf_skc_to_tcp_sock]; \ r7 = r0; \ r1 = r6; \ call %[bpf_skc_to_tcp_request_sock]; \ r8 = r0; \ if r8 != 0 goto l1_%=; \ r0 = 0; \ exit; \ l1_%=: r0 = *(u8*)(r7 + 0); \ exit; \ " : : __imm(bpf_skc_to_tcp_request_sock), __imm(bpf_skc_to_tcp_sock), __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) : __clobber_all); } SEC("cgroup/post_bind4") __description("sk->src_ip6[0] [load 1st byte]") __failure __msg("invalid bpf_context access off=28 size=2") __naked void post_bind4_read_src_ip6(void) { asm volatile (" \ r6 = r1; \ r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \ r0 = 1; \ exit; \ " : : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0])) : __clobber_all); } SEC("cgroup/post_bind4") __description("sk->mark [load mark]") __failure __msg("invalid bpf_context access off=16 size=2") __naked void post_bind4_read_mark(void) { asm volatile (" \ r6 = r1; \ r7 = *(u16*)(r6 + %[bpf_sock_mark]); \ r0 = 1; \ exit; \ " : : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)) : __clobber_all); } SEC("cgroup/post_bind6") __description("sk->src_ip4 [load src_ip4]") __failure __msg("invalid bpf_context access off=24 size=2") __naked void post_bind6_read_src_ip4(void) { asm volatile (" \ r6 = r1; \ r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \ r0 = 1; \ exit; \ " : : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4)) : __clobber_all); } SEC("cgroup/sock_create") __description("sk->src_port [word load]") __failure __msg("invalid bpf_context access off=44 size=2") __naked void sock_create_read_src_port(void) { asm volatile (" \ r6 = r1; \ r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \ r0 = 1; \ exit; \ " : : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)) : __clobber_all); } char _license[] SEC("license") = "GPL";
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
You can’t perform that action at this time.