Skip to content

Commit

Permalink
xdp: Make devmap flush_list common for all map instances
Browse files Browse the repository at this point in the history
The devmap flush list is used to track entries that need to flushed
from via the xdp_do_flush_map() function. This list used to be
per-map, but there is really no reason for that. Instead make the
flush list global for all devmaps, which simplifies __dev_map_flush()
and dev_map_init_map().

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20191219061006.21980-6-bjorn.topel@gmail.com
  • Loading branch information
Björn Töpel authored and Alexei Starovoitov committed Dec 20, 2019
1 parent e312b9e commit 9636000
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 25 deletions.
4 changes: 2 additions & 2 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -959,7 +959,7 @@ struct sk_buff;

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_flush(struct bpf_map *map);
void __dev_map_flush(void);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
Expand Down Expand Up @@ -1068,7 +1068,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}

static inline void __dev_map_flush(struct bpf_map *map)
static inline void __dev_map_flush(void)
{
}

Expand Down
35 changes: 13 additions & 22 deletions kernel/bpf/devmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ struct bpf_dtab_netdev {
struct bpf_dtab {
struct bpf_map map;
struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
struct list_head __percpu *flush_list;
struct list_head list;

/* these are only used for DEVMAP_HASH type maps */
Expand All @@ -85,6 +84,7 @@ struct bpf_dtab {
u32 n_buckets;
};

static DEFINE_PER_CPU(struct list_head, dev_map_flush_list);
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);

Expand All @@ -109,8 +109,8 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,

static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
int err, cpu;
u64 cost;
u64 cost = 0;
int err;

/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
Expand All @@ -125,9 +125,6 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)

bpf_map_init_from_attr(&dtab->map, attr);

/* make sure page count doesn't overflow */
cost = (u64) sizeof(struct list_head) * num_possible_cpus();

if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);

Expand All @@ -143,31 +140,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (err)
return -EINVAL;

dtab->flush_list = alloc_percpu(struct list_head);
if (!dtab->flush_list)
goto free_charge;

for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));

if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
if (!dtab->dev_index_head)
goto free_percpu;
goto free_charge;

spin_lock_init(&dtab->index_lock);
} else {
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
goto free_percpu;
goto free_charge;
}

return 0;

free_percpu:
free_percpu(dtab->flush_list);
free_charge:
bpf_map_charge_finish(&dtab->map.memory);
return -ENOMEM;
Expand Down Expand Up @@ -254,7 +242,6 @@ static void dev_map_free(struct bpf_map *map)
bpf_map_area_free(dtab->netdev_map);
}

free_percpu(dtab->flush_list);
kfree(dtab);
}

Expand Down Expand Up @@ -384,10 +371,9 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags)
* net device can be torn down. On devmap tear down we ensure the flush list
* is empty before completing to ensure all flush operations have completed.
*/
void __dev_map_flush(struct bpf_map *map)
void __dev_map_flush(void)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
struct xdp_bulk_queue *bq, *tmp;

rcu_read_lock();
Expand Down Expand Up @@ -419,7 +405,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct net_device *dev_rx)

{
struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list);
struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);

if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
Expand Down Expand Up @@ -777,10 +763,15 @@ static struct notifier_block dev_map_notifier = {

static int __init dev_map_init(void)
{
int cpu;

/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
offsetof(struct _bpf_dtab_netdev, dev));
register_netdevice_notifier(&dev_map_notifier);

for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(dev_map_flush_list, cpu));
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -3555,7 +3555,7 @@ void xdp_do_flush_map(void)
switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
__dev_map_flush(map);
__dev_map_flush();
break;
case BPF_MAP_TYPE_CPUMAP:
__cpu_map_flush(map);
Expand Down

0 comments on commit 9636000

Please sign in to comment.