Skip to content

Commit

Permalink
xsk: Make xskmap flush_list common for all map instances
Browse files Browse the repository at this point in the history
The xskmap flush list is used to track entries that need to flushed
from via the xdp_do_flush_map() function. This list used to be
per-map, but there is really no reason for that. Instead make the
flush list global for all xskmaps, which simplifies __xsk_map_flush()
and xsk_map_alloc().

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20191219061006.21980-5-bjorn.topel@gmail.com
  • Loading branch information
Björn Töpel authored and Alexei Starovoitov committed Dec 20, 2019
1 parent fb5aacd commit e312b9e
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 35 deletions.
11 changes: 4 additions & 7 deletions include/net/xdp_sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ struct xdp_umem {

struct xsk_map {
struct bpf_map map;
struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */
struct xdp_sock *xsk_map[];
};
Expand Down Expand Up @@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs);
void __xsk_map_flush(struct bpf_map *map);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);

static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key)
Expand Down Expand Up @@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0;
}

static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs)
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -EOPNOTSUPP;
}

static inline void __xsk_map_flush(struct bpf_map *map)
static inline void __xsk_map_flush(void)
{
}

Expand Down
18 changes: 3 additions & 15 deletions kernel/bpf/xskmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{
struct bpf_map_memory mem;
int cpu, err, numa_node;
int err, numa_node;
struct xsk_map *m;
u64 cost, size;
u64 size;

if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
Expand All @@ -86,9 +86,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)

numa_node = bpf_map_attr_numa_node(attr);
size = struct_size(m, xsk_map, attr->max_entries);
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());

err = bpf_map_charge_init(&mem, cost);
err = bpf_map_charge_init(&mem, size);
if (err < 0)
return ERR_PTR(err);

Expand All @@ -102,16 +101,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock);

m->flush_list = alloc_percpu(struct list_head);
if (!m->flush_list) {
bpf_map_charge_finish(&m->map.memory);
bpf_map_area_free(m);
return ERR_PTR(-ENOMEM);
}

for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));

return &m->map;
}

Expand All @@ -121,7 +110,6 @@ static void xsk_map_free(struct bpf_map *map)

bpf_clear_redirect_map(map);
synchronize_net();
free_percpu(m->flush_list);
bpf_map_area_free(m);
}

Expand Down
9 changes: 4 additions & 5 deletions net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -3511,8 +3511,7 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,

static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_map *map,
struct xdp_buff *xdp,
u32 index)
struct xdp_buff *xdp)
{
int err;

Expand All @@ -3537,7 +3536,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
case BPF_MAP_TYPE_XSKMAP: {
struct xdp_sock *xs = fwd;

err = __xsk_map_redirect(map, xdp, xs);
err = __xsk_map_redirect(xs, xdp);
return err;
}
default:
Expand All @@ -3562,7 +3561,7 @@ void xdp_do_flush_map(void)
__cpu_map_flush(map);
break;
case BPF_MAP_TYPE_XSKMAP:
__xsk_map_flush(map);
__xsk_map_flush();
break;
default:
break;
Expand Down Expand Up @@ -3619,7 +3618,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
xdp_do_flush_map();

err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
if (unlikely(err))
goto err;

Expand Down
17 changes: 9 additions & 8 deletions net/xdp/xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@

#define TX_BATCH_SIZE 16

static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);

bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
Expand Down Expand Up @@ -264,11 +266,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return err;
}

int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
struct xdp_sock *xs)
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
int err;

err = xsk_rcv(xs, xdp);
Expand All @@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
return 0;
}

void __xsk_map_flush(struct bpf_map *map)
void __xsk_map_flush(void)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct xdp_sock *xs, *tmp;

list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
Expand Down Expand Up @@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = {

static int __init xsk_init(void)
{
int err;
int err, cpu;

err = proto_register(&xsk_proto, 0 /* no slab */);
if (err)
Expand All @@ -1195,6 +1194,8 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;

for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0;

out_pernet:
Expand Down

0 comments on commit e312b9e

Please sign in to comment.