Skip to content

Commit

Permalink
selftests/bpf: trace_helpers.c: Optimize kallsyms cache
Browse files Browse the repository at this point in the history
Static ksyms often have problems because the number of symbols exceeds the
MAX_SYMS limit. Like changing the MAX_SYMS from 300000 to 400000 in
commit e76a014("selftests/bpf: Bump and validate MAX_SYMS") solves
the problem somewhat, but it's not the perfect way.

This commit uses dynamic memory allocation, which completely solves the
problem caused by the limitation of the number of kallsyms. At the same
time, add APIs:

    load_kallsyms_local()
    ksym_search_local()
    ksym_get_addr_local()
    free_kallsyms_local()

There are used to solve the problem of selftests/bpf updating kallsyms
after attach new symbols during testmod testing.

Signed-off-by: Rong Tao <rongtao@cestc.cn>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/tencent_C9BDA68F9221F21BE4081566A55D66A9700A@qq.com
  • Loading branch information
Rong Tao authored and Andrii Nakryiko committed Sep 8, 2023
1 parent 9bc8692 commit c698eae
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 46 deletions.
4 changes: 4 additions & 0 deletions samples/bpf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,7 @@ TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
TPROGS_CFLAGS += -I$(srctree)/tools/include
TPROGS_CFLAGS += -I$(srctree)/tools/perf
TPROGS_CFLAGS += -I$(srctree)/tools/lib
TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0

ifdef SYSROOT
Expand Down Expand Up @@ -314,6 +315,9 @@ XDP_SAMPLE_CFLAGS += -Wall -O2 \

$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS)
$(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h
# Override includes for trace_helpers.o because __must_check won't be defined
# in our include path.
$(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=

-include $(BPF_SAMPLES_PATH)/Makefile.target

Expand Down
2 changes: 1 addition & 1 deletion tools/testing/selftests/bpf/prog_tests/fill_link_info.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ void test_fill_link_info(void)
return;

/* load kallsyms to compare the addr */
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
goto cleanup;

kprobe_addr = ksym_get_addr(KPROBE_FUNC);
Expand Down
20 changes: 13 additions & 7 deletions tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
#include "trace_helpers.h"
#include "bpf/libbpf_internal.h"

static struct ksyms *ksyms;

static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
{
ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
Expand Down Expand Up @@ -50,12 +52,12 @@ static void test_testmod_attach_api_addrs(void)
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
unsigned long long addrs[3];

addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1");
ASSERT_NEQ(addrs[0], 0, "ksym_get_addr");
addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2");
ASSERT_NEQ(addrs[1], 0, "ksym_get_addr");
addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3");
ASSERT_NEQ(addrs[2], 0, "ksym_get_addr");
addrs[0] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test1");
ASSERT_NEQ(addrs[0], 0, "ksym_get_addr_local");
addrs[1] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test2");
ASSERT_NEQ(addrs[1], 0, "ksym_get_addr_local");
addrs[2] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test3");
ASSERT_NEQ(addrs[2], 0, "ksym_get_addr_local");

opts.addrs = (const unsigned long *) addrs;
opts.cnt = ARRAY_SIZE(addrs);
Expand All @@ -79,11 +81,15 @@ static void test_testmod_attach_api_syms(void)

void serial_test_kprobe_multi_testmod_test(void)
{
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
ksyms = load_kallsyms_local();
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
return;

if (test__start_subtest("testmod_attach_api_syms"))
test_testmod_attach_api_syms();

if (test__start_subtest("testmod_attach_api_addrs"))
test_testmod_attach_api_addrs();

free_kallsyms_local(ksyms);
}
130 changes: 94 additions & 36 deletions tools/testing/selftests/bpf/trace_helpers.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,104 +14,162 @@
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
#include "bpf/libbpf_internal.h"

#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"

#define MAX_SYMS 400000
static struct ksym syms[MAX_SYMS];
static int sym_cnt;
struct ksyms {
struct ksym *syms;
size_t sym_cap;
size_t sym_cnt;
};

static struct ksyms *ksyms;

static int ksyms__add_symbol(struct ksyms *ksyms, const char *name,
unsigned long addr)
{
void *tmp;

tmp = strdup(name);
if (!tmp)
return -ENOMEM;
ksyms->syms[ksyms->sym_cnt].addr = addr;
ksyms->syms[ksyms->sym_cnt].name = tmp;
ksyms->sym_cnt++;
return 0;
}

void free_kallsyms_local(struct ksyms *ksyms)
{
unsigned int i;

if (!ksyms)
return;

if (!ksyms->syms) {
free(ksyms);
return;
}

for (i = 0; i < ksyms->sym_cnt; i++)
free(ksyms->syms[i].name);
free(ksyms->syms);
free(ksyms);
}

static int ksym_cmp(const void *p1, const void *p2)
{
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}

int load_kallsyms_refresh(void)
struct ksyms *load_kallsyms_local(void)
{
FILE *f;
char func[256], buf[256];
char symbol;
void *addr;
int i = 0;

sym_cnt = 0;
int ret;
struct ksyms *ksyms;

f = fopen("/proc/kallsyms", "r");
if (!f)
return -ENOENT;
return NULL;

ksyms = calloc(1, sizeof(struct ksyms));
if (!ksyms) {
fclose(f);
return NULL;
}

while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
if (!addr)
continue;
if (i >= MAX_SYMS)
return -EFBIG;

syms[i].addr = (long) addr;
syms[i].name = strdup(func);
i++;
ret = libbpf_ensure_mem((void **) &ksyms->syms, &ksyms->sym_cap,
sizeof(struct ksym), ksyms->sym_cnt + 1);
if (ret)
goto error;
ret = ksyms__add_symbol(ksyms, func, (unsigned long)addr);
if (ret)
goto error;
}
fclose(f);
sym_cnt = i;
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
return 0;
qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), ksym_cmp);
return ksyms;

error:
fclose(f);
free_kallsyms_local(ksyms);
return NULL;
}

int load_kallsyms(void)
{
/*
* This is called/used from multiplace places,
* load symbols just once.
*/
if (sym_cnt)
return 0;
return load_kallsyms_refresh();
if (!ksyms)
ksyms = load_kallsyms_local();
return ksyms ? 0 : 1;
}

struct ksym *ksym_search(long key)
struct ksym *ksym_search_local(struct ksyms *ksyms, long key)
{
int start = 0, end = sym_cnt;
int start = 0, end = ksyms->sym_cnt;
int result;

/* kallsyms not loaded. return NULL */
if (sym_cnt <= 0)
if (ksyms->sym_cnt <= 0)
return NULL;

while (start < end) {
size_t mid = start + (end - start) / 2;

result = key - syms[mid].addr;
result = key - ksyms->syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
return &syms[mid];
return &ksyms->syms[mid];
}

if (start >= 1 && syms[start - 1].addr < key &&
key < syms[start].addr)
if (start >= 1 && ksyms->syms[start - 1].addr < key &&
key < ksyms->syms[start].addr)
/* valid ksym */
return &syms[start - 1];
return &ksyms->syms[start - 1];

/* out of range. return _stext */
return &syms[0];
return &ksyms->syms[0];
}

long ksym_get_addr(const char *name)
struct ksym *ksym_search(long key)
{
if (!ksyms)
return NULL;
return ksym_search_local(ksyms, key);
}

long ksym_get_addr_local(struct ksyms *ksyms, const char *name)
{
int i;

for (i = 0; i < sym_cnt; i++) {
if (strcmp(syms[i].name, name) == 0)
return syms[i].addr;
for (i = 0; i < ksyms->sym_cnt; i++) {
if (strcmp(ksyms->syms[i].name, name) == 0)
return ksyms->syms[i].addr;
}

return 0;
}

long ksym_get_addr(const char *name)
{
if (!ksyms)
return 0;
return ksym_get_addr_local(ksyms, name);
}

/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
* this is faster than load + find.
*/
Expand Down
8 changes: 6 additions & 2 deletions tools/testing/selftests/bpf/trace_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,17 @@ struct ksym {
long addr;
char *name;
};
struct ksyms;

int load_kallsyms(void);
int load_kallsyms_refresh(void);

struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);

struct ksyms *load_kallsyms_local(void);
struct ksym *ksym_search_local(struct ksyms *ksyms, long key);
long ksym_get_addr_local(struct ksyms *ksyms, const char *name);
void free_kallsyms_local(struct ksyms *ksyms);

/* open kallsyms and find addresses on the fly, faster than load + search. */
int kallsyms_find(const char *sym, unsigned long long *addr);

Expand Down

0 comments on commit c698eae

Please sign in to comment.