diff --git a/include/net/tcp.h b/include/net/tcp.h index 14fdd7ce9992b..8a61c3e8c15df 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2051,11 +2051,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); #define TCP_ULP_MAX 128 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) -enum { - TCP_ULP_TLS, - TCP_ULP_BPF, -}; - struct tcp_ulp_ops { struct list_head list; @@ -2064,9 +2059,7 @@ struct tcp_ulp_ops { /* cleanup ulp */ void (*release)(struct sock *sk); - int uid; char name[TCP_ULP_NAME_MAX]; - bool user_visible; struct module *owner; }; int tcp_register_ulp(struct tcp_ulp_ops *type); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7d6d9cf9ebd52..98fa0be35370b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1528,14 +1528,19 @@ static bool __is_pointer_value(bool allow_ptr_leaks, return reg->type != SCALAR_VALUE; } +static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) +{ + return cur_regs(env) + regno; +} + static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { - return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); + return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { - const struct bpf_reg_state *reg = cur_regs(env) + regno; + const struct bpf_reg_state *reg = reg_state(env, regno); return reg->type == PTR_TO_CTX || reg->type == PTR_TO_SOCKET; @@ -1543,11 +1548,19 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { - const struct bpf_reg_state *reg = cur_regs(env) + regno; + const struct bpf_reg_state *reg = reg_state(env, regno); return type_is_pkt_pointer(reg->type); } +static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) +{ + const struct bpf_reg_state *reg = reg_state(env, regno); + + /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ + return reg->type == PTR_TO_FLOW_KEYS; +} + static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) @@ -1956,9 +1969,11 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins } if (is_ctx_reg(env, insn->dst_reg) || - is_pkt_reg(env, insn->dst_reg)) { + is_pkt_reg(env, insn->dst_reg) || + is_flow_key_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", - insn->dst_reg, reg_type_str[insn->dst_reg]); + insn->dst_reg, + reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } @@ -1983,7 +1998,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { - struct bpf_reg_state *reg = cur_regs(env) + regno; + struct bpf_reg_state *reg = reg_state(env, regno); struct bpf_func_state *state = func(env, reg); int off, i, slot, spi; @@ -2062,8 +2077,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); - case PTR_TO_FLOW_KEYS: - return check_flow_keys_access(env, reg->off, access_size); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); @@ -5264,7 +5277,8 @@ static int do_check(struct bpf_verifier_env *env) if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d %s is not allowed\n", - insn->dst_reg, reg_type_str[insn->dst_reg]); + insn->dst_reg, + reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } @@ -6164,7 +6178,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || - insn->imm == BPF_FUNC_map_delete_elem)) { + insn->imm == BPF_FUNC_map_delete_elem || + insn->imm == BPF_FUNC_map_push_elem || + insn->imm == BPF_FUNC_map_pop_elem || + insn->imm == BPF_FUNC_map_peek_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; @@ -6197,6 +6214,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_push_elem, + (int (*)(struct bpf_map *map, void *value, + u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_pop_elem, + (int (*)(struct bpf_map *map, void *value))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_peek_elem, + (int (*)(struct bpf_map *map, void *value))NULL)); + switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - @@ -6210,6 +6235,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; + case BPF_FUNC_map_push_elem: + insn->imm = BPF_CAST_CALL(ops->map_push_elem) - + __bpf_call_base; + continue; + case BPF_FUNC_map_pop_elem: + insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - + __bpf_call_base; + continue; + case BPF_FUNC_map_peek_elem: + insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - + __bpf_call_base; + continue; } goto patch_call_imm; diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index e90b6d5370774..311cec8e533de 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -715,8 +715,6 @@ EXPORT_SYMBOL(tls_unregister_device); static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { .name = "tls", - .uid = TCP_ULP_TLS, - .user_visible = true, .owner = THIS_MODULE, .init = tls_init, }; diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 6d41323be291b..bdaf4062e26e0 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c @@ -50,15 +50,17 @@ static void int_exit(int signo) stop = true; } -static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv) +static enum bpf_perf_event_ret +print_bpf_output(struct perf_event_header *event, void *private_data) { - struct event_ring_info *ring = priv; - struct perf_event_sample *e = event; + struct perf_event_sample *e = container_of(event, struct perf_event_sample, + header); + struct event_ring_info *ring = private_data; struct { struct perf_event_header header; __u64 id; __u64 lost; - } *lost = event; + } *lost = (typeof(lost))event; if (json_output) { jsonw_start_object(json_wtr); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0c21355f04a77..b607be7236d3e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2415,56 +2415,47 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, } enum bpf_perf_event_ret -bpf_perf_event_read_simple(void *mem, unsigned long size, - unsigned long page_size, void **buf, size_t *buf_len, - bpf_perf_event_print_t fn, void *priv) +bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, + void **copy_mem, size_t *copy_size, + bpf_perf_event_print_t fn, void *private_data) { - struct perf_event_mmap_page *header = mem; + struct perf_event_mmap_page *header = mmap_mem; __u64 data_head = ring_buffer_read_head(header); __u64 data_tail = header->data_tail; - int ret = LIBBPF_PERF_EVENT_ERROR; - void *base, *begin, *end; - - if (data_head == data_tail) - return LIBBPF_PERF_EVENT_CONT; - - base = ((char *)header) + page_size; - - begin = base + data_tail % size; - end = base + data_head % size; - - while (begin != end) { - struct perf_event_header *ehdr; - - ehdr = begin; - if (begin + ehdr->size > base + size) { - long len = base + size - begin; - - if (*buf_len < ehdr->size) { - free(*buf); - *buf = malloc(ehdr->size); - if (!*buf) { + void *base = ((__u8 *)header) + page_size; + int ret = LIBBPF_PERF_EVENT_CONT; + struct perf_event_header *ehdr; + size_t ehdr_size; + + while (data_head != data_tail) { + ehdr = base + (data_tail & (mmap_size - 1)); + ehdr_size = ehdr->size; + + if (((void *)ehdr) + ehdr_size > base + mmap_size) { + void *copy_start = ehdr; + size_t len_first = base + mmap_size - copy_start; + size_t len_secnd = ehdr_size - len_first; + + if (*copy_size < ehdr_size) { + free(*copy_mem); + *copy_mem = malloc(ehdr_size); + if (!*copy_mem) { + *copy_size = 0; ret = LIBBPF_PERF_EVENT_ERROR; break; } - *buf_len = ehdr->size; + *copy_size = ehdr_size; } - memcpy(*buf, begin, len); - memcpy(*buf + len, base, ehdr->size - len); - ehdr = (void *)*buf; - begin = base + ehdr->size - len; - } else if (begin + ehdr->size == base + size) { - begin = base; - } else { - begin += ehdr->size; + memcpy(*copy_mem, copy_start, len_first); + memcpy(*copy_mem + len_first, base, len_secnd); + ehdr = *copy_mem; } - ret = fn(ehdr, priv); + ret = fn(ehdr, private_data); + data_tail += ehdr_size; if (ret != LIBBPF_PERF_EVENT_CONT) break; - - data_tail += ehdr->size; } ring_buffer_write_tail(header, data_tail); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 1354cc9f8cbae..1f3468dad8b2c 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -297,13 +297,14 @@ enum bpf_perf_event_ret { LIBBPF_PERF_EVENT_CONT = -2, }; -typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(void *event, - void *priv); -LIBBPF_API int bpf_perf_event_read_simple(void *mem, unsigned long size, - unsigned long page_size, - void **buf, size_t *buf_len, - bpf_perf_event_print_t fn, - void *priv); +struct perf_event_header; +typedef enum bpf_perf_event_ret + (*bpf_perf_event_print_t)(struct perf_event_header *hdr, + void *private_data); +LIBBPF_API enum bpf_perf_event_ret +bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, + void **copy_mem, size_t *copy_size, + bpf_perf_event_print_t fn, void *private_data); struct nlattr; typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index f1ae8d09770fc..769d68a48f303 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3430,7 +3430,7 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_ST stores into R1 inv is not allowed", + .errstr = "BPF_ST stores into R1 ctx is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -3442,7 +3442,7 @@ static struct bpf_test tests[] = { BPF_REG_0, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_XADD stores into R1 inv is not allowed", + .errstr = "BPF_XADD stores into R1 ctx is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -5670,7 +5670,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R2 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 inv is not allowed", + .errstr = "BPF_XADD stores into R1 ctx is not allowed", }, { "leak pointer into ctx 2", @@ -5685,7 +5685,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R10 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 inv is not allowed", + .errstr = "BPF_XADD stores into R1 ctx is not allowed", }, { "leak pointer into ctx 3", @@ -12634,7 +12634,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "BPF_XADD stores into R2 ctx", + .errstr = "BPF_XADD stores into R2 pkt is not allowed", .prog_type = BPF_PROG_TYPE_XDP, }, { diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index a3d1dace4a756..4cdb63bf0521d 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -125,10 +125,11 @@ struct perf_event_sample { char data[]; }; -static enum bpf_perf_event_ret bpf_perf_event_print(void *event, void *priv) +static enum bpf_perf_event_ret +bpf_perf_event_print(struct perf_event_header *hdr, void *private_data) { - struct perf_event_sample *e = event; - perf_event_print_fn fn = priv; + struct perf_event_sample *e = (struct perf_event_sample *)hdr; + perf_event_print_fn fn = private_data; int ret; if (e->header.type == PERF_RECORD_SAMPLE) {