Skip to content

Commit

Permalink
Merge branch 'bpf-copy_verifier_state-should-copy-loop_entry-field'
Browse files Browse the repository at this point in the history
Eduard Zingerman says:

====================
This patch set fixes a bug in copy_verifier_state() where the
loop_entry field was not copied. This omission led to incorrect
loop_entry fields remaining in env->cur_state, causing incorrect
decisions about loop entry assignments in update_loop_entry().

An example of an unsafe program accepted by the verifier due to this
bug can be found in patch #2. This bug can also cause an infinite loop
in the verifier, see patch #5.

Structure of the patch set:
- Patch #1 fixes the bug but has a significant negative impact on
  verification performance for sched_ext programs.
- Patch #3 mitigates the verification performance impact of patch #1
  by avoiding clean_live_states() for states whose loop_entry is still
  being verified. This reduces the number of processed instructions
  for sched_ext programs by 28–92% in some cases.
- Patches #5-6 simplify {get,update}_loop_entry() logic (and are not
  strictly necessary).
- Patches #7–10 mitigate the memory overhead introduced by patch #1
  when a program with iterator-based loop hits the 1M instruction
  limit. This is achieved by freeing states in env->free_list when
  their branches and used_as_loop_entry counts reach zero.

Patches #1-4 were previously sent as a part of [1].

[1] https://lore.kernel.org/bpf/20250122120442.3536298-1-eddyz87@gmail.com/
====================

Link: https://patch.msgid.link/20250215110411.3236773-1-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Alexei Starovoitov committed Feb 19, 2025
2 parents 0fc6025 + 574078b commit 654765b
Show file tree
Hide file tree
Showing 3 changed files with 296 additions and 97 deletions.
25 changes: 16 additions & 9 deletions include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -427,11 +427,6 @@ struct bpf_verifier_state {
bool active_rcu_lock;

bool speculative;
/* If this state was ever pointed-to by other state's loop_entry field
* this flag would be set to true. Used to avoid freeing such states
* while they are still in use.
*/
bool used_as_loop_entry;
bool in_sleepable;

/* first and last insn idx of this verifier state */
Expand All @@ -458,6 +453,11 @@ struct bpf_verifier_state {
u32 dfs_depth;
u32 callback_unroll_depth;
u32 may_goto_depth;
/* If this state was ever pointed-to by other state's loop_entry field
* this flag would be set to true. Used to avoid freeing such states
* while they are still in use.
*/
u32 used_as_loop_entry;
};

#define bpf_get_spilled_reg(slot, frame, mask) \
Expand Down Expand Up @@ -498,8 +498,10 @@ struct bpf_verifier_state {
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
struct bpf_verifier_state_list *next;
int miss_cnt, hit_cnt;
struct list_head node;
u32 miss_cnt;
u32 hit_cnt:31;
u32 in_free_list:1;
};

struct bpf_loop_inline_state {
Expand Down Expand Up @@ -710,8 +712,11 @@ struct bpf_verifier_env {
bool test_state_freq; /* test verifier with different pruning frequency */
bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
/* Search pruning optimization, array of list_heads for
* lists of struct bpf_verifier_state_list.
*/
struct list_head *explored_states;
struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
Expand Down Expand Up @@ -767,6 +772,8 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
u32 free_list_size;
u32 explored_states_size;
bpfptr_t fd_array;

/* bit mask to keep track of whether a register has been accessed
Expand Down
Loading

0 comments on commit 654765b

Please sign in to comment.