Skip to content

Commit

Permalink
xdp: Make cpumap flush_list common for all map instances
Browse files Browse the repository at this point in the history
The cpumap flush list is used to track entries that need to flushed
from via the xdp_do_flush_map() function. This list used to be
per-map, but there is really no reason for that. Instead make the
flush list global for all devmaps, which simplifies __cpu_map_flush()
and cpu_map_alloc().

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20191219061006.21980-7-bjorn.topel@gmail.com
  • Loading branch information
Björn Töpel authored and Alexei Starovoitov committed Dec 20, 2019
1 parent 9636000 commit cdfafe9
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 21 deletions.
4 changes: 2 additions & 2 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -966,7 +966,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);

struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(struct bpf_map *map);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);

Expand Down Expand Up @@ -1097,7 +1097,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL;
}

static inline void __cpu_map_flush(struct bpf_map *map)
static inline void __cpu_map_flush(void)
{
}

Expand Down
36 changes: 18 additions & 18 deletions kernel/bpf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,17 +72,18 @@ struct bpf_cpu_map {
struct bpf_map map;
/* Below members specific for map type */
struct bpf_cpu_map_entry **cpu_map;
struct list_head __percpu *flush_list;
};

static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);

static int bq_flush_to_queue(struct xdp_bulk_queue *bq);

static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{
struct bpf_cpu_map *cmap;
int err = -ENOMEM;
int ret, cpu;
u64 cost;
int ret;

if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
Expand All @@ -106,7 +107,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)

/* make sure page count doesn't overflow */
cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
cost += sizeof(struct list_head) * num_possible_cpus();

/* Notice returns -EPERM on if map size is larger than memlock limit */
ret = bpf_map_charge_init(&cmap->map.memory, cost);
Expand All @@ -115,23 +115,14 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
goto free_cmap;
}

cmap->flush_list = alloc_percpu(struct list_head);
if (!cmap->flush_list)
goto free_charge;

for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu));

/* Alloc array for possible remote "destination" CPUs */
cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
sizeof(struct bpf_cpu_map_entry *),
cmap->map.numa_node);
if (!cmap->cpu_map)
goto free_percpu;
goto free_charge;

return &cmap->map;
free_percpu:
free_percpu(cmap->flush_list);
free_charge:
bpf_map_charge_finish(&cmap->map.memory);
free_cmap:
Expand Down Expand Up @@ -526,7 +517,6 @@ static void cpu_map_free(struct bpf_map *map)
/* bq flush and cleanup happens after RCU grace-period */
__cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
}
free_percpu(cmap->flush_list);
bpf_map_area_free(cmap->cpu_map);
kfree(cmap);
}
Expand Down Expand Up @@ -618,7 +608,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
*/
static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{
struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list);
struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);

if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
Expand Down Expand Up @@ -657,10 +647,9 @@ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
return 0;
}

void __cpu_map_flush(struct bpf_map *map)
void __cpu_map_flush(void)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
struct list_head *flush_list = this_cpu_ptr(cmap->flush_list);
struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct xdp_bulk_queue *bq, *tmp;

list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
Expand All @@ -670,3 +659,14 @@ void __cpu_map_flush(struct bpf_map *map)
wake_up_process(bq->obj->kthread);
}
}

static int __init cpu_map_init(void)
{
int cpu;

for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
return 0;
}

subsys_initcall(cpu_map_init);
2 changes: 1 addition & 1 deletion net/core/filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -3558,7 +3558,7 @@ void xdp_do_flush_map(void)
__dev_map_flush();
break;
case BPF_MAP_TYPE_CPUMAP:
__cpu_map_flush(map);
__cpu_map_flush();
break;
case BPF_MAP_TYPE_XSKMAP:
__xsk_map_flush();
Expand Down

0 comments on commit cdfafe9

Please sign in to comment.