Skip to content

Commit

Permalink
netdevsim: bpf: support fake map offload
Browse files Browse the repository at this point in the history
Add to netdevsim ability to pretend it's offloading BPF maps.
We only allow allocation of tiny 2 entry maps, to keep things
simple.  Mutex lock may seem heavy for the operations we
perform, but we want to make sure callbacks can sleep.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
  • Loading branch information
Jakub Kicinski authored and Daniel Borkmann committed Jan 18, 2018
1 parent 064a07c commit 395cacb
Show file tree
Hide file tree
Showing 2 changed files with 249 additions and 0 deletions.
246 changes: 246 additions & 0 deletions drivers/net/netdevsim/bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <linux/bpf_verifier.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/rtnetlink.h>
#include <net/pkt_cls.h>

Expand All @@ -31,6 +32,19 @@ struct nsim_bpf_bound_prog {
struct list_head l;
};

#define NSIM_BPF_MAX_KEYS 2

struct nsim_bpf_bound_map {
struct netdevsim *ns;
struct bpf_offloaded_map *map;
struct mutex mutex;
struct nsim_map_entry {
void *key;
void *value;
} entry[NSIM_BPF_MAX_KEYS];
struct list_head l;
};

static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
{
const char **str = file->private;
Expand Down Expand Up @@ -284,6 +298,224 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
return 0;
}

static bool
nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
{
return e->key && !memcmp(key, e->key, map->key_size);
}

static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
unsigned int i;

for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
return i;

return -ENOENT;
}

static int
nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;

nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
if (!nmap->entry[idx].key)
return -ENOMEM;
nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
return -ENOMEM;
}

return 0;
}

static int
nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx = -ENOENT;

mutex_lock(&nmap->mutex);

if (key)
idx = nsim_map_key_find(offmap, key);
if (idx == -ENOENT)
idx = 0;
else
idx++;

for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
if (nmap->entry[idx].key) {
memcpy(next_key, nmap->entry[idx].key,
offmap->map.key_size);
break;
}
}

mutex_unlock(&nmap->mutex);

if (idx == ARRAY_SIZE(nmap->entry))
return -ENOENT;
return 0;
}

static int
nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx;

mutex_lock(&nmap->mutex);

idx = nsim_map_key_find(offmap, key);
if (idx >= 0)
memcpy(value, nmap->entry[idx].value, offmap->map.value_size);

mutex_unlock(&nmap->mutex);

return idx < 0 ? idx : 0;
}

static int
nsim_map_update_elem(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx, err = 0;

mutex_lock(&nmap->mutex);

idx = nsim_map_key_find(offmap, key);
if (idx < 0 && flags == BPF_EXIST) {
err = idx;
goto exit_unlock;
}
if (idx >= 0 && flags == BPF_NOEXIST) {
err = -EEXIST;
goto exit_unlock;
}

if (idx < 0) {
for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
if (!nmap->entry[idx].key)
break;
if (idx == ARRAY_SIZE(nmap->entry)) {
err = -E2BIG;
goto exit_unlock;
}

err = nsim_map_alloc_elem(offmap, idx);
if (err)
goto exit_unlock;
}

memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
exit_unlock:
mutex_unlock(&nmap->mutex);

return err;
}

static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx;

if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
return -EINVAL;

mutex_lock(&nmap->mutex);

idx = nsim_map_key_find(offmap, key);
if (idx >= 0) {
kfree(nmap->entry[idx].key);
kfree(nmap->entry[idx].value);
memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
}

mutex_unlock(&nmap->mutex);

return idx < 0 ? idx : 0;
}

static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
.map_get_next_key = nsim_map_get_next_key,
.map_lookup_elem = nsim_map_lookup_elem,
.map_update_elem = nsim_map_update_elem,
.map_delete_elem = nsim_map_delete_elem,
};

static int
nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
{
struct nsim_bpf_bound_map *nmap;
unsigned int i;
int err;

if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
offmap->map.map_type != BPF_MAP_TYPE_HASH))
return -EINVAL;
if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
return -ENOMEM;
if (offmap->map.map_flags)
return -EINVAL;

nmap = kzalloc(sizeof(*nmap), GFP_USER);
if (!nmap)
return -ENOMEM;

offmap->dev_priv = nmap;
nmap->ns = ns;
nmap->map = offmap;
mutex_init(&nmap->mutex);

if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
u32 *key;

err = nsim_map_alloc_elem(offmap, i);
if (err)
goto err_free;
key = nmap->entry[i].key;
*key = i;
}
}

offmap->dev_ops = &nsim_bpf_map_ops;
list_add_tail(&nmap->l, &ns->bpf_bound_maps);

return 0;

err_free:
while (--i) {
kfree(nmap->entry[i].key);
kfree(nmap->entry[i].value);
}
kfree(nmap);
return err;
}

static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
unsigned int i;

for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
kfree(nmap->entry[i].key);
kfree(nmap->entry[i].value);
}
list_del_init(&nmap->l);
mutex_destroy(&nmap->mutex);
kfree(nmap);
}

int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct netdevsim *ns = netdev_priv(dev);
Expand Down Expand Up @@ -328,6 +560,14 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return err;

return nsim_xdp_set_prog(ns, bpf);
case BPF_OFFLOAD_MAP_ALLOC:
if (!ns->bpf_map_accept)
return -EOPNOTSUPP;

return nsim_bpf_map_alloc(ns, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
nsim_bpf_map_free(bpf->offmap);
return 0;
default:
return -EINVAL;
}
Expand All @@ -336,6 +576,7 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
int nsim_bpf_init(struct netdevsim *ns)
{
INIT_LIST_HEAD(&ns->bpf_bound_progs);
INIT_LIST_HEAD(&ns->bpf_bound_maps);

debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
&ns->bpf_offloaded_id);
Expand All @@ -362,12 +603,17 @@ int nsim_bpf_init(struct netdevsim *ns)
debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
&ns->bpf_xdpoffload_accept);

ns->bpf_map_accept = true;
debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
&ns->bpf_map_accept);

return 0;
}

void nsim_bpf_uninit(struct netdevsim *ns)
{
WARN_ON(!list_empty(&ns->bpf_bound_progs));
WARN_ON(!list_empty(&ns->bpf_bound_maps));
WARN_ON(ns->xdp_prog);
WARN_ON(ns->bpf_offloaded);
}
3 changes: 3 additions & 0 deletions drivers/net/netdevsim/netdevsim.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ struct netdevsim {
bool bpf_tc_non_bound_accept;
bool bpf_xdpdrv_accept;
bool bpf_xdpoffload_accept;

bool bpf_map_accept;
struct list_head bpf_bound_maps;
};

extern struct dentry *nsim_ddir;
Expand Down

0 comments on commit 395cacb

Please sign in to comment.