Skip to content

Commit

Permalink
samples/bpf: Convert XDP samples to libbpf usage
Browse files Browse the repository at this point in the history
Some of XDP samples that are attaching the bpf program to the interface
via libbpf's bpf_set_link_xdp_fd are still using the bpf_load.c for
loading and manipulating the ebpf program and maps. Convert them to do
this through libbpf usage and remove bpf_load from the picture.

While at it remove what looks like debug leftover in
xdp_redirect_map_user.c

In xdp_redirect_cpu, change the way that the program to be loaded onto
interface is chosen - user now needs to pass the program's section name
instead of the relative number. In case of typo print out the section
names to choose from.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
  • Loading branch information
Maciej Fijalkowski authored and Daniel Borkmann committed Feb 1, 2019
1 parent 7313798 commit bbaf602
Show file tree
Hide file tree
Showing 6 changed files with 253 additions and 103 deletions.
8 changes: 4 additions & 4 deletions samples/bpf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -87,18 +87,18 @@ test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
xdp1-objs := xdp1_user.o
# reuse xdp1 source intentionally
xdp2-objs := xdp1_user.o
xdp_router_ipv4-objs := bpf_load.o xdp_router_ipv4_user.o
xdp_router_ipv4-objs := xdp_router_ipv4_user.o
test_current_task_under_cgroup-objs := bpf_load.o $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
trace_event-objs := bpf_load.o trace_event_user.o $(TRACE_HELPERS)
sampleip-objs := bpf_load.o sampleip_user.o $(TRACE_HELPERS)
tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
xdp_tx_iptunnel-objs := bpf_load.o xdp_tx_iptunnel_user.o
xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o
xdp_redirect-objs := bpf_load.o xdp_redirect_user.o
xdp_redirect_map-objs := bpf_load.o xdp_redirect_map_user.o
xdp_redirect-objs := xdp_redirect_user.o
xdp_redirect_map-objs := xdp_redirect_map_user.o
xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
xdp_rxq_info-objs := xdp_rxq_info_user.o
Expand Down
145 changes: 102 additions & 43 deletions samples/bpf/xdp_redirect_cpu_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,8 @@ static const char *__doc__ =
/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 6

/* Wanted to get rid of bpf_load.h and fake-"libbpf.h" (and instead
* use bpf/libbpf.h), but cannot as (currently) needed for XDP
* attaching to a device via bpf_set_link_xdp_fd()
*/
#include <bpf/bpf.h>
#include "bpf_load.h"
#include "bpf/libbpf.h"

#include "bpf_util.h"

Expand All @@ -38,6 +34,15 @@ static char ifname_buf[IF_NAMESIZE];
static char *ifname;

static __u32 xdp_flags;
static int cpu_map_fd;
static int rx_cnt_map_fd;
static int redirect_err_cnt_map_fd;
static int cpumap_enqueue_cnt_map_fd;
static int cpumap_kthread_cnt_map_fd;
static int cpus_available_map_fd;
static int cpus_count_map_fd;
static int cpus_iterator_map_fd;
static int exception_cnt_map_fd;

/* Exit return codes */
#define EXIT_OK 0
Expand All @@ -52,7 +57,7 @@ static const struct option long_options[] = {
{"dev", required_argument, NULL, 'd' },
{"skb-mode", no_argument, NULL, 'S' },
{"sec", required_argument, NULL, 's' },
{"prognum", required_argument, NULL, 'p' },
{"progname", required_argument, NULL, 'p' },
{"qsize", required_argument, NULL, 'q' },
{"cpu", required_argument, NULL, 'c' },
{"stress-mode", no_argument, NULL, 'x' },
Expand All @@ -70,7 +75,17 @@ static void int_exit(int sig)
exit(EXIT_OK);
}

static void usage(char *argv[])
static void print_avail_progs(struct bpf_object *obj)
{
struct bpf_program *pos;

bpf_object__for_each_program(pos, obj) {
if (bpf_program__is_xdp(pos))
printf(" %s\n", bpf_program__title(pos, false));
}
}

static void usage(char *argv[], struct bpf_object *obj)
{
int i;

Expand All @@ -88,6 +103,8 @@ static void usage(char *argv[])
long_options[i].val);
printf("\n");
}
printf("\n Programs to be used for --progname:\n");
print_avail_progs(obj);
printf("\n");
}

Expand Down Expand Up @@ -262,7 +279,7 @@ static __u64 calc_errs_pps(struct datarec *r,

static void stats_print(struct stats_record *stats_rec,
struct stats_record *stats_prev,
int prog_num)
char *prog_name)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
double pps = 0, drop = 0, err = 0;
Expand All @@ -272,7 +289,7 @@ static void stats_print(struct stats_record *stats_rec,
int i;

/* Header */
printf("Running XDP/eBPF prog_num:%d\n", prog_num);
printf("Running XDP/eBPF prog_name:%s\n", prog_name);
printf("%-15s %-7s %-14s %-11s %-9s\n",
"XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");

Expand Down Expand Up @@ -423,20 +440,20 @@ static void stats_collect(struct stats_record *rec)
{
int fd, i;

fd = map_fd[1]; /* map: rx_cnt */
fd = rx_cnt_map_fd;
map_collect_percpu(fd, 0, &rec->rx_cnt);

fd = map_fd[2]; /* map: redirect_err_cnt */
fd = redirect_err_cnt_map_fd;
map_collect_percpu(fd, 1, &rec->redir_err);

fd = map_fd[3]; /* map: cpumap_enqueue_cnt */
fd = cpumap_enqueue_cnt_map_fd;
for (i = 0; i < MAX_CPUS; i++)
map_collect_percpu(fd, i, &rec->enq[i]);

fd = map_fd[4]; /* map: cpumap_kthread_cnt */
fd = cpumap_kthread_cnt_map_fd;
map_collect_percpu(fd, 0, &rec->kthread);

fd = map_fd[8]; /* map: exception_cnt */
fd = exception_cnt_map_fd;
map_collect_percpu(fd, 0, &rec->exception);
}

Expand All @@ -461,7 +478,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
* the kernel for the cpu.
*/
ret = bpf_map_update_elem(map_fd[0], &cpu, &queue_size, 0);
ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF);
Expand All @@ -470,23 +487,22 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
/* Inform bpf_prog's that a new CPU is available to select
* from via some control maps.
*/
/* map_fd[5] = cpus_available */
ret = bpf_map_update_elem(map_fd[5], &avail_idx, &cpu, 0);
ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF);
}

/* When not replacing/updating existing entry, bump the count */
/* map_fd[6] = cpus_count */
ret = bpf_map_lookup_elem(map_fd[6], &key, &curr_cpus_count);
ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
if (ret) {
fprintf(stderr, "Failed reading curr cpus_count\n");
exit(EXIT_FAIL_BPF);
}
if (new) {
curr_cpus_count++;
ret = bpf_map_update_elem(map_fd[6], &key, &curr_cpus_count, 0);
ret = bpf_map_update_elem(cpus_count_map_fd, &key,
&curr_cpus_count, 0);
if (ret) {
fprintf(stderr, "Failed write curr cpus_count\n");
exit(EXIT_FAIL_BPF);
Expand All @@ -509,8 +525,8 @@ static void mark_cpus_unavailable(void)
int ret, i;

for (i = 0; i < MAX_CPUS; i++) {
/* map_fd[5] = cpus_available */
ret = bpf_map_update_elem(map_fd[5], &i, &invalid_cpu, 0);
ret = bpf_map_update_elem(cpus_available_map_fd, &i,
&invalid_cpu, 0);
if (ret) {
fprintf(stderr, "Failed marking CPU unavailable\n");
exit(EXIT_FAIL_BPF);
Expand All @@ -530,7 +546,7 @@ static void stress_cpumap(void)
create_cpu_entry(1, 16000, 0, false);
}

static void stats_poll(int interval, bool use_separators, int prog_num,
static void stats_poll(int interval, bool use_separators, char *prog_name,
bool stress_mode)
{
struct stats_record *record, *prev;
Expand All @@ -546,7 +562,7 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
while (1) {
swap(&prev, &record);
stats_collect(record);
stats_print(record, prev, prog_num);
stats_print(record, prev, prog_name);
sleep(interval);
if (stress_mode)
stress_cpumap();
Expand All @@ -556,17 +572,51 @@ static void stats_poll(int interval, bool use_separators, int prog_num,
free_stats_record(prev);
}

static int init_map_fds(struct bpf_object *obj)
{
cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
redirect_err_cnt_map_fd =
bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
cpumap_enqueue_cnt_map_fd =
bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
cpumap_kthread_cnt_map_fd =
bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
cpus_available_map_fd =
bpf_object__find_map_fd_by_name(obj, "cpus_available");
cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
cpus_iterator_map_fd =
bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
exception_cnt_map_fd =
bpf_object__find_map_fd_by_name(obj, "exception_cnt");

if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
exception_cnt_map_fd < 0)
return -ENOENT;

return 0;
}

int main(int argc, char **argv)
{
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_UNSPEC,
};
bool use_separators = true;
bool stress_mode = false;
struct bpf_program *prog;
struct bpf_object *obj;
char filename[256];
int added_cpus = 0;
int longindex = 0;
int interval = 2;
int prog_num = 5;
int add_cpu = -1;
int prog_fd;
__u32 qsize;
int opt;

Expand All @@ -579,22 +629,25 @@ int main(int argc, char **argv)
qsize = 128+64;

snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
prog_load_attr.file = filename;

if (setrlimit(RLIMIT_MEMLOCK, &r)) {
perror("setrlimit(RLIMIT_MEMLOCK)");
return 1;
}

if (load_bpf_file(filename)) {
fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return EXIT_FAIL;
}

if (!prog_fd[0]) {
fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
if (prog_fd < 0) {
fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
strerror(errno));
return EXIT_FAIL;
}
if (init_map_fds(obj) < 0) {
fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
return EXIT_FAIL;
}

mark_cpus_unavailable();

/* Parse commands line args */
Expand Down Expand Up @@ -630,13 +683,7 @@ int main(int argc, char **argv)
break;
case 'p':
/* Selecting eBPF prog to load */
prog_num = atoi(optarg);
if (prog_num < 0 || prog_num >= MAX_PROG) {
fprintf(stderr,
"--prognum too large err(%d):%s\n",
errno, strerror(errno));
goto error;
}
prog_name = optarg;
break;
case 'c':
/* Add multiple CPUs */
Expand All @@ -656,33 +703,45 @@ int main(int argc, char **argv)
case 'h':
error:
default:
usage(argv);
usage(argv, obj);
return EXIT_FAIL_OPTION;
}
}
/* Required option */
if (ifindex == -1) {
fprintf(stderr, "ERR: required option --dev missing\n");
usage(argv);
usage(argv, obj);
return EXIT_FAIL_OPTION;
}
/* Required option */
if (add_cpu == -1) {
fprintf(stderr, "ERR: required option --cpu missing\n");
fprintf(stderr, " Specify multiple --cpu option to add more\n");
usage(argv);
usage(argv, obj);
return EXIT_FAIL_OPTION;
}

/* Remove XDP program when program is interrupted or killed */
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);

if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) {
prog = bpf_object__find_program_by_title(obj, prog_name);
if (!prog) {
fprintf(stderr, "bpf_object__find_program_by_title failed\n");
return EXIT_FAIL;
}

prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
fprintf(stderr, "bpf_program__fd failed\n");
return EXIT_FAIL;
}

if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
fprintf(stderr, "link set xdp fd failed\n");
return EXIT_FAIL_XDP;
}

stats_poll(interval, use_separators, prog_num, stress_mode);
stats_poll(interval, use_separators, prog_name, stress_mode);
return EXIT_OK;
}
Loading

0 comments on commit bbaf602

Please sign in to comment.