Skip to content

Commit

Permalink
bpf: use list_head to track explored states and free list
Browse files Browse the repository at this point in the history
The next patch in the set needs the ability to remove individual
states from env->free_list while only holding a pointer to the state.
Which requires env->free_list to be a doubly linked list.
This patch converts env->free_list and struct bpf_verifier_state_list
to use struct list_head for this purpose. The change to
env->explored_states is collateral.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250215110411.3236773-9-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Eduard Zingerman authored and Alexei Starovoitov committed Feb 19, 2025
1 parent 590eee4 commit 5564ee3
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 41 deletions.
9 changes: 6 additions & 3 deletions include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ struct bpf_verifier_state {
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
struct bpf_verifier_state_list *next;
struct list_head node;
int miss_cnt, hit_cnt;
};

Expand Down Expand Up @@ -710,8 +710,11 @@ struct bpf_verifier_env {
bool test_state_freq; /* test verifier with different pruning frequency */
bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
/* Search pruning optimization, array of list_heads for
* lists of struct bpf_verifier_state_list.
*/
struct list_head *explored_states;
struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
Expand Down
74 changes: 36 additions & 38 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -1691,7 +1691,7 @@ static u32 state_htab_size(struct bpf_verifier_env *env)
return env->prog->len;
}

static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx)
static struct list_head *explored_state(struct bpf_verifier_env *env, int idx)
{
struct bpf_verifier_state *cur = env->cur_state;
struct bpf_func_state *state = cur->frame[cur->curframe];
Expand Down Expand Up @@ -8443,10 +8443,12 @@ static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
{
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *st;
struct list_head *pos, *head;

/* Explored states are pushed in stack order, most recent states come first */
sl = *explored_state(env, insn_idx);
for (; sl; sl = sl->next) {
head = explored_state(env, insn_idx);
list_for_each(pos, head) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
/* If st->branches != 0 state is a part of current DFS verification path,
* hence cur & st for a loop.
*/
Expand Down Expand Up @@ -17857,20 +17859,20 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
{
struct bpf_verifier_state *loop_entry;
struct bpf_verifier_state_list *sl;
struct list_head *pos, *head;

sl = *explored_state(env, insn);
while (sl) {
head = explored_state(env, insn);
list_for_each(pos, head) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
if (sl->state.branches)
goto next;
continue;
loop_entry = get_loop_entry(env, &sl->state);
if (!IS_ERR_OR_NULL(loop_entry) && loop_entry->branches)
goto next;
continue;
if (sl->state.insn_idx != insn ||
!same_callsites(&sl->state, cur))
goto next;
continue;
clean_verifier_state(env, &sl->state);
next:
sl = sl->next;
}
}

Expand Down Expand Up @@ -18561,10 +18563,11 @@ static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf
static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl, **pprev;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry;
int i, j, n, err, states_cnt = 0;
bool force_new_state, add_new_state, force_exact;
struct list_head *pos, *tmp, *head;

force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) ||
/* Avoid accumulating infinitely long jmp history */
Expand All @@ -18583,15 +18586,14 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
env->insn_processed - env->prev_insn_processed >= 8)
add_new_state = true;

pprev = explored_state(env, insn_idx);
sl = *pprev;

clean_live_states(env, insn_idx, cur);

while (sl) {
head = explored_state(env, insn_idx);
list_for_each_safe(pos, tmp, head) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
states_cnt++;
if (sl->state.insn_idx != insn_idx)
goto next;
continue;

if (sl->state.branches) {
struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
Expand Down Expand Up @@ -18796,7 +18798,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* the state is unlikely to be useful. Remove it to
* speed up verification
*/
*pprev = sl->next;
list_del(&sl->node);
if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE &&
!sl->state.used_as_loop_entry) {
u32 br = sl->state.branches;
Expand All @@ -18812,15 +18814,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* walk it later. Add it for free_list instead to
* be freed at the end of verification
*/
sl->next = env->free_list;
env->free_list = sl;
list_add(&sl->node, &env->free_list);
}
sl = *pprev;
continue;
}
next:
pprev = &sl->next;
sl = *pprev;
}

if (env->max_states_per_insn < states_cnt)
Expand Down Expand Up @@ -18869,8 +18865,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
cur->first_insn_idx = insn_idx;
cur->insn_hist_start = cur->insn_hist_end;
cur->dfs_depth = new->dfs_depth + 1;
new_sl->next = *explored_state(env, insn_idx);
*explored_state(env, insn_idx) = new_sl;
list_add(&new_sl->node, head);

/* connect new state to parentage chain. Current frame needs all
* registers connected. Only r6 - r9 of the callers are alive (pushed
* to the stack implicitly by JITs) so in callers' frames connect just
Expand Down Expand Up @@ -22194,31 +22190,29 @@ static int remove_fastcall_spills_fills(struct bpf_verifier_env *env)

static void free_states(struct bpf_verifier_env *env)
{
struct bpf_verifier_state_list *sl, *sln;
struct bpf_verifier_state_list *sl;
struct list_head *head, *pos, *tmp;
int i;

sl = env->free_list;
while (sl) {
sln = sl->next;
list_for_each_safe(pos, tmp, &env->free_list) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
free_verifier_state(&sl->state, false);
kfree(sl);
sl = sln;
}
env->free_list = NULL;
INIT_LIST_HEAD(&env->free_list);

if (!env->explored_states)
return;

for (i = 0; i < state_htab_size(env); i++) {
sl = env->explored_states[i];
head = &env->explored_states[i];

while (sl) {
sln = sl->next;
list_for_each_safe(pos, tmp, head) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
free_verifier_state(&sl->state, false);
kfree(sl);
sl = sln;
}
env->explored_states[i] = NULL;
INIT_LIST_HEAD(&env->explored_states[i]);
}
}

Expand Down Expand Up @@ -23186,12 +23180,16 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;

env->explored_states = kvcalloc(state_htab_size(env),
sizeof(struct bpf_verifier_state_list *),
sizeof(struct list_head),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;

for (i = 0; i < state_htab_size(env); i++)
INIT_LIST_HEAD(&env->explored_states[i]);
INIT_LIST_HEAD(&env->free_list);

ret = check_btf_info_early(env, attr, uattr);
if (ret < 0)
goto skip_full_check;
Expand Down

0 comments on commit 5564ee3

Please sign in to comment.