Skip to content

Commit

Permalink
Merge branch 'introduce dummy BPF STRUCT_OPS'
Browse files Browse the repository at this point in the history
Hou Tao says:

====================

Hi,

Currently the test of BPF STRUCT_OPS depends on the specific bpf
implementation (e.g, tcp_congestion_ops), but it can not cover all
basic functionalities (e.g, return value handling), so introduce
a dummy BPF STRUCT_OPS for test purpose.

Instead of loading a userspace-implemeted bpf_dummy_ops map into
kernel and calling the specific function by writing to sysfs provided
by bpf_testmode.ko, only loading bpf_dummy_ops related prog into
kernel and calling these prog by bpf_prog_test_run(). The latter
is more flexible and has no dependency on extra kernel module.

Now the return value handling is supported by test_1(...) ops,
and passing multiple arguments is supported by test_2(...) ops.
If more is needed, test_x(...) ops can be added afterwards.

Comments are always welcome.
Regards,
Hou

Change Log:
v4:
 * add Acked-by tags in patch 1~4
 * patch 2: remove unncessary comments and update commit message
            accordingly
 * patch 4: remove unnecessary nr checking in dummy_ops_init_args()

v3: https://www.spinics.net/lists/bpf/msg48303.html
 * rebase on bpf-next
 * address comments for Martin, mainly include: merge patch 3 &
   patch 4 in v2, fix names of btf ctx access check helpers,
   handle CONFIG_NET, fix leak in dummy_ops_init_args(), and
   simplify bpf_dummy_init()
 * patch 4: use a loop to check args in test_dummy_multiple_args()

v2: https://www.spinics.net/lists/bpf/msg47948.html
 * rebase on bpf-next
 * add test_2(...) ops to test the passing of multiple arguments
 * a new patch (patch #2) is added to factor out ctx access helpers
 * address comments from Martin & Andrii

v1: https://www.spinics.net/lists/bpf/msg46787.html

RFC: https://www.spinics.net/lists/bpf/msg46117.html
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Alexei Starovoitov committed Nov 1, 2021
2 parents 36e70b9 + 31122b2 commit f27a6fa
Show file tree
Hide file tree
Showing 9 changed files with 439 additions and 32 deletions.
43 changes: 43 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1000,6 +1000,10 @@ bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
struct bpf_prog *prog,
const struct btf_func_model *model,
void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
Expand All @@ -1014,6 +1018,22 @@ static inline void bpf_module_put(const void *data, struct module *owner)
else
module_put(owner);
}

#ifdef CONFIG_NET
/* Define it here to avoid the use of forward declaration */
struct bpf_dummy_ops_state {
int val;
};

struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *cb);
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
char a3, unsigned long a4);
};

int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
#endif
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
Expand Down Expand Up @@ -1646,6 +1666,29 @@ bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);

static inline bool bpf_tracing_ctx_access(int off, int size,
enum bpf_access_type type)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return true;
}

static inline bool bpf_tracing_btf_ctx_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (!bpf_tracing_ctx_access(off, size, type))
return false;
return btf_ctx_access(off, size, type, prog, info);
}

int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
Expand Down
32 changes: 22 additions & 10 deletions kernel/bpf/bpf_struct_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,9 @@ const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
};

const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
#ifdef CONFIG_NET
.test_run = bpf_struct_ops_test_run,
#endif
};

static const struct btf_type *module_type;
Expand Down Expand Up @@ -312,6 +315,20 @@ static int check_zero_holes(const struct btf_type *t, void *data)
return 0;
}

int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
struct bpf_prog *prog,
const struct btf_func_model *model,
void *image, void *image_end)
{
u32 flags;

tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
return arch_prepare_bpf_trampoline(NULL, image, image_end,
model, flags, tprogs, NULL);
}

static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
Expand All @@ -323,7 +340,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
struct bpf_tramp_progs *tprogs = NULL;
void *udata, *kdata;
int prog_fd, err = 0;
void *image;
void *image, *image_end;
u32 i;

if (flags)
Expand Down Expand Up @@ -363,12 +380,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
udata = &uvalue->data;
kdata = &kvalue->data;
image = st_map->image;
image_end = st_map->image + PAGE_SIZE;

for_each_member(i, t, member) {
const struct btf_type *mtype, *ptype;
struct bpf_prog *prog;
u32 moff;
u32 flags;

moff = btf_member_bit_offset(t, member) / 8;
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
Expand Down Expand Up @@ -430,14 +447,9 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto reset_unlock;
}

tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
flags = st_ops->func_models[i].ret_size > 0 ?
BPF_TRAMP_F_RET_FENTRY_RET : 0;
err = arch_prepare_bpf_trampoline(NULL, image,
st_map->image + PAGE_SIZE,
&st_ops->func_models[i],
flags, tprogs, NULL);
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
&st_ops->func_models[i],
image, image_end);
if (err < 0)
goto reset_unlock;

Expand Down
3 changes: 3 additions & 0 deletions kernel/bpf/bpf_struct_ops_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
/* internal file - do not include directly */

#ifdef CONFIG_BPF_JIT
#ifdef CONFIG_NET
BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
#endif
#ifdef CONFIG_INET
#include <net/tcp.h>
BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
Expand Down
16 changes: 2 additions & 14 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -1646,27 +1646,15 @@ static bool raw_tp_prog_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return true;
return bpf_tracing_ctx_access(off, size, type);
}

static bool tracing_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return btf_ctx_access(off, size, type, prog, info);
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}

int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
Expand Down
3 changes: 3 additions & 0 deletions net/bpf/Makefile
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BPF_SYSCALL) := test_run.o
ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_dummy_struct_ops.o
endif
200 changes: 200 additions & 0 deletions net/bpf/bpf_dummy_struct_ops.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021. Huawei Technologies Co., Ltd
*/
#include <linux/kernel.h>
#include <linux/bpf_verifier.h>
#include <linux/bpf.h>
#include <linux/btf.h>

extern struct bpf_struct_ops bpf_bpf_dummy_ops;

/* A common type for test_N with return value in bpf_dummy_ops */
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);

struct bpf_dummy_ops_test_args {
u64 args[MAX_BPF_FUNC_ARGS];
struct bpf_dummy_ops_state state;
};

static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
__u32 size_in;
struct bpf_dummy_ops_test_args *args;
void __user *ctx_in;
void __user *u_state;

size_in = kattr->test.ctx_size_in;
if (size_in != sizeof(u64) * nr)
return ERR_PTR(-EINVAL);

args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args)
return ERR_PTR(-ENOMEM);

ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
if (copy_from_user(args->args, ctx_in, size_in))
goto out;

/* args[0] is 0 means state argument of test_N will be NULL */
u_state = u64_to_user_ptr(args->args[0]);
if (u_state && copy_from_user(&args->state, u_state,
sizeof(args->state)))
goto out;

return args;
out:
kfree(args);
return ERR_PTR(-EFAULT);
}

static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
{
void __user *u_state;

u_state = u64_to_user_ptr(args->args[0]);
if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
return -EFAULT;

return 0;
}

static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
{
dummy_ops_test_ret_fn test = (void *)image;
struct bpf_dummy_ops_state *state = NULL;

/* state needs to be NULL if args[0] is 0 */
if (args->args[0])
state = &args->state;
return test(state, args->args[1], args->args[2],
args->args[3], args->args[4]);
}

int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
const struct btf_type *func_proto;
struct bpf_dummy_ops_test_args *args;
struct bpf_tramp_progs *tprogs;
void *image = NULL;
unsigned int op_idx;
int prog_ret;
int err;

if (prog->aux->attach_btf_id != st_ops->type_id)
return -EOPNOTSUPP;

func_proto = prog->aux->attach_func_proto;
args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
if (IS_ERR(args))
return PTR_ERR(args);

tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
if (!tprogs) {
err = -ENOMEM;
goto out;
}

image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!image) {
err = -ENOMEM;
goto out;
}
set_vm_flush_reset_perms(image);

op_idx = prog->expected_attach_type;
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
&st_ops->func_models[op_idx],
image, image + PAGE_SIZE);
if (err < 0)
goto out;

set_memory_ro((long)image, 1);
set_memory_x((long)image, 1);
prog_ret = dummy_ops_call_op(image, args);

err = dummy_ops_copy_args(args);
if (err)
goto out;
if (put_user(prog_ret, &uattr->test.retval))
err = -EFAULT;
out:
kfree(args);
bpf_jit_free_exec(image);
kfree(tprogs);
return err;
}

static int bpf_dummy_init(struct btf *btf)
{
return 0;
}

static bool bpf_dummy_ops_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}

static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct btf *btf,
const struct btf_type *t, int off,
int size, enum bpf_access_type atype,
u32 *next_btf_id)
{
const struct btf_type *state;
s32 type_id;
int err;

type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state",
BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;

state = btf_type_by_id(btf, type_id);
if (t != state) {
bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
return -EACCES;
}

err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
if (err < 0)
return err;

return atype == BPF_READ ? err : NOT_INIT;
}

static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
.is_valid_access = bpf_dummy_ops_is_valid_access,
.btf_struct_access = bpf_dummy_ops_btf_struct_access,
};

static int bpf_dummy_init_member(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata)
{
return -EOPNOTSUPP;
}

static int bpf_dummy_reg(void *kdata)
{
return -EOPNOTSUPP;
}

static void bpf_dummy_unreg(void *kdata)
{
}

struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.init_member = bpf_dummy_init_member,
.reg = bpf_dummy_reg,
.unreg = bpf_dummy_unreg,
.name = "bpf_dummy_ops",
};
9 changes: 1 addition & 8 deletions net/ipv4/bpf_tcp_ca.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,7 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;

if (!btf_ctx_access(off, size, type, prog, info))
if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
return false;

if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
Expand Down
Loading

0 comments on commit f27a6fa

Please sign in to comment.