Skip to content

Commit

Permalink
Merge branch 'bpf-allocator'
Browse files Browse the repository at this point in the history
Alexei Starovoitov says:

====================
Introduce any context BPF specific memory allocator.

Tracing BPF programs can attach to kprobe and fentry. Hence they run in
unknown context where calling plain kmalloc() might not be safe. Front-end
kmalloc() with per-cpu cache of free elements. Refill this cache asynchronously
from irq_work.

Major achievements enabled by bpf_mem_alloc:
- Dynamically allocated hash maps used to be 10 times slower than fully
  preallocated. With bpf_mem_alloc and subsequent optimizations the speed
  of dynamic maps is equal to full prealloc.
- Tracing bpf programs can use dynamically allocated hash maps. Potentially
  saving lots of memory. Typical hash map is sparsely populated.
- Sleepable bpf programs can used dynamically allocated hash maps.

Future work:
- Expose bpf_mem_alloc as uapi FD to be used in dynptr_alloc, kptr_alloc
- Convert lru map to bpf_mem_alloc
- Further cleanup htab code. Example: htab_use_raw_lock can be removed.

Changelog:

v5->v6:
- Debugged the reason for selftests/bpf/test_maps ooming in a small VM that BPF CI is using.
  Added patch 16 that optimizes the usage of rcu_barrier-s between bpf_mem_alloc and
  hash map. It drastically improved the speed of htab destruction.

v4->v5:
- Fixed missing migrate_disable in hash tab free path (Daniel)
- Replaced impossible "memory leak" with WARN_ON_ONCE (Martin)
- Dropped sysctl kernel.bpf_force_dyn_alloc patch (Daniel)
- Added Andrii's ack
- Added new patch 15 that removes kmem_cache usage from bpf_mem_alloc.
  It saves memory, speeds up map create/destroy operations
  while maintains hash map update/delete performance.

v3->v4:
- fix build issue due to missing local.h on 32-bit arch
- add Kumar's ack
- proposal for next steps from Delyan:
https://lore.kernel.org/bpf/d3f76b27f4e55ec9e400ae8dcaecbb702a4932e8.camel@fb.com/

v2->v3:
- Rewrote the free_list algorithm based on discussions with Kumar. Patch 1.
- Allowed sleepable bpf progs use dynamically allocated maps. Patches 13 and 14.
- Added sysctl to force bpf_mem_alloc in hash map even if pre-alloc is
  requested to reduce memory consumption. Patch 15.
- Fix: zero-fill percpu allocation
- Single rcu_barrier at the end instead of each cpu during bpf_mem_alloc destruction

v2 thread:
https://lore.kernel.org/bpf/20220817210419.95560-1-alexei.starovoitov@gmail.com/

v1->v2:
- Moved unsafe direct call_rcu() from hash map into safe place inside bpf_mem_alloc. Patches 7 and 9.
- Optimized atomic_inc/dec in hash map with percpu_counter. Patch 6.
- Tuned watermarks per allocation size. Patch 8
- Adopted this approach to per-cpu allocation. Patch 10.
- Fully converted hash map to bpf_mem_alloc. Patch 11.
- Removed tracing prog restriction on map types. Combination of all patches and final patch 12.

v1 thread:
https://lore.kernel.org/bpf/20220623003230.37497-1-alexei.starovoitov@gmail.com/

LWN article:
https://lwn.net/Articles/899274/
====================

Link: https://lore.kernel.org/r/
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
  • Loading branch information
Daniel Borkmann committed Sep 5, 2022
2 parents 0b20a13 + 9f2c6e9 commit 274052a
Show file tree
Hide file tree
Showing 10 changed files with 820 additions and 134 deletions.
28 changes: 28 additions & 0 deletions include/linux/bpf_mem_alloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#ifndef _BPF_MEM_ALLOC_H
#define _BPF_MEM_ALLOC_H
#include <linux/compiler_types.h>
#include <linux/workqueue.h>

struct bpf_mem_cache;
struct bpf_mem_caches;

struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
struct work_struct work;
};

int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);

/* kmalloc/kfree equivalent: */
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);

/* kmem_cache_alloc/free equivalent: */
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);

#endif /* _BPF_MEM_ALLOC_H */
2 changes: 1 addition & 1 deletion kernel/bpf/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_JIT) += trampoline.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o memalloc.o
obj-$(CONFIG_BPF_JIT) += dispatcher.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
Expand Down
138 changes: 100 additions & 38 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
#include <linux/bpf_mem_alloc.h>

#define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
Expand Down Expand Up @@ -92,14 +93,21 @@ struct bucket {

struct bpf_htab {
struct bpf_map map;
struct bpf_mem_alloc ma;
struct bpf_mem_alloc pcpu_ma;
struct bucket *buckets;
void *elems;
union {
struct pcpu_freelist freelist;
struct bpf_lru lru;
};
struct htab_elem *__percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */
/* number of elements in non-preallocated hashtable are kept
* in either pcount or count
*/
struct percpu_counter pcount;
atomic_t count;
bool use_percpu_counter;
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
u32 hashrnd;
Expand All @@ -114,14 +122,14 @@ struct htab_elem {
struct {
void *padding;
union {
struct bpf_htab *htab;
struct pcpu_freelist_node fnode;
struct htab_elem *batch_flink;
};
};
};
union {
struct rcu_head rcu;
/* pointer to per-cpu pointer */
void *ptr_to_pptr;
struct bpf_lru_node lru_node;
};
u32 hash;
Expand Down Expand Up @@ -441,8 +449,6 @@ static int htab_map_alloc_check(union bpf_attr *attr)
bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
int numa_node = bpf_map_attr_numa_node(attr);

BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev));
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
offsetof(struct htab_elem, hash_node.pprev));

Expand Down Expand Up @@ -563,6 +569,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)

htab_init_buckets(htab);

/* compute_batch_value() computes batch value as num_online_cpus() * 2
* and __percpu_counter_compare() needs
* htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
* for percpu_counter to be faster than atomic_t. In practice the average bpf
* hash map size is 10k, which means that a system with 64 cpus will fill
* hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
* define our own batch count as 32 then 10k hash map can be filled up to 80%:
* 10k - 8k > 32 _batch_ * 64 _cpus_
* and __percpu_counter_compare() will still be fast. At that point hash map
* collisions will dominate its performance anyway. Assume that hash map filled
* to 50+% isn't going to be O(1) and use the following formula to choose
* between percpu_counter and atomic_t.
*/
#define PERCPU_COUNTER_BATCH 32
if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
htab->use_percpu_counter = true;

if (htab->use_percpu_counter) {
err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
if (err)
goto free_map_locked;
}

if (prealloc) {
err = prealloc_init(htab);
if (err)
Expand All @@ -576,6 +605,16 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (err)
goto free_prealloc;
}
} else {
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
if (err)
goto free_map_locked;
if (percpu) {
err = bpf_mem_alloc_init(&htab->pcpu_ma,
round_up(htab->map.value_size, 8), true);
if (err)
goto free_map_locked;
}
}

return &htab->map;
Expand All @@ -586,6 +625,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]);
bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->pcpu_ma);
bpf_mem_alloc_destroy(&htab->ma);
free_htab:
lockdep_unregister_key(&htab->lockdep_key);
bpf_map_area_free(htab);
Expand Down Expand Up @@ -860,17 +901,9 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
{
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
check_and_free_fields(htab, l);
kfree(l);
}

static void htab_elem_free_rcu(struct rcu_head *head)
{
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;

htab_elem_free(htab, l);
bpf_mem_cache_free(&htab->ma, l);
}

static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
Expand All @@ -884,6 +917,31 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
}
}

static bool is_map_full(struct bpf_htab *htab)
{
if (htab->use_percpu_counter)
return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
PERCPU_COUNTER_BATCH) >= 0;
return atomic_read(&htab->count) >= htab->map.max_entries;
}

static void inc_elem_count(struct bpf_htab *htab)
{
if (htab->use_percpu_counter)
percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
else
atomic_inc(&htab->count);
}

static void dec_elem_count(struct bpf_htab *htab)
{
if (htab->use_percpu_counter)
percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
else
atomic_dec(&htab->count);
}


static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{
htab_put_fd_value(htab, l);
Expand All @@ -892,9 +950,8 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
check_and_free_fields(htab, l);
__pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
atomic_dec(&htab->count);
l->htab = htab;
call_rcu(&l->rcu, htab_elem_free_rcu);
dec_elem_count(htab);
htab_elem_free(htab, l);
}
}

Expand All @@ -919,13 +976,12 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
void *value, bool onallcpus)
{
/* When using prealloc and not setting the initial value on all cpus,
* zero-fill element values for other cpus (just as what happens when
* not using prealloc). Otherwise, bpf program has no way to ensure
/* When not setting the initial value on all cpus, zero-fill element
* values for other cpus. Otherwise, bpf program has no way to ensure
* known initial values for cpus other than current one
* (onallcpus=false always when coming from bpf prog).
*/
if (htab_is_prealloc(htab) && !onallcpus) {
if (!onallcpus) {
u32 size = round_up(htab->map.value_size, 8);
int current_cpu = raw_smp_processor_id();
int cpu;
Expand Down Expand Up @@ -976,19 +1032,16 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = container_of(l, struct htab_elem, fnode);
}
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries)
if (!old_elem) {
if (is_map_full(htab))
if (!old_elem)
/* when map is full and update() is replacing
* old element, it's ok to allocate, since
* old element will be freed immediately.
* Otherwise return an error
*/
l_new = ERR_PTR(-E2BIG);
goto dec_count;
}
l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
GFP_NOWAIT | __GFP_NOWARN,
htab->map.numa_node);
return ERR_PTR(-E2BIG);
inc_elem_count(htab);
l_new = bpf_mem_cache_alloc(&htab->ma);
if (!l_new) {
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
Expand All @@ -999,18 +1052,18 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,

memcpy(l_new->key, key, key_size);
if (percpu) {
size = round_up(size, 8);
if (prealloc) {
pptr = htab_elem_get_ptr(l_new, key_size);
} else {
/* alloc_percpu zero-fills */
pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
GFP_NOWAIT | __GFP_NOWARN);
pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
if (!pptr) {
kfree(l_new);
bpf_mem_cache_free(&htab->ma, l_new);
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
}
l_new->ptr_to_pptr = pptr;
pptr = *(void **)pptr;
}

pcpu_init_value(htab, pptr, value, onallcpus);
Expand All @@ -1029,7 +1082,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new->hash = hash;
return l_new;
dec_count:
atomic_dec(&htab->count);
dec_elem_count(htab);
return l_new;
}

Expand Down Expand Up @@ -1429,6 +1482,10 @@ static void delete_all_elements(struct bpf_htab *htab)
{
int i;

/* It's called from a worker thread, so disable migration here,
* since bpf_mem_cache_free() relies on that.
*/
migrate_disable();
for (i = 0; i < htab->n_buckets; i++) {
struct hlist_nulls_head *head = select_bucket(htab, i);
struct hlist_nulls_node *n;
Expand All @@ -1439,6 +1496,7 @@ static void delete_all_elements(struct bpf_htab *htab)
htab_elem_free(htab, l);
}
}
migrate_enable();
}

static void htab_free_malloced_timers(struct bpf_htab *htab)
Expand Down Expand Up @@ -1488,10 +1546,10 @@ static void htab_map_free(struct bpf_map *map)
* There is no need to synchronize_rcu() here to protect map elements.
*/

/* some of free_htab_elem() callbacks for elements of this map may
* not have executed. Wait for them.
/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
* underneath and is reponsible for waiting for callbacks to finish
* during bpf_mem_alloc_destroy().
*/
rcu_barrier();
if (!htab_is_prealloc(htab)) {
delete_all_elements(htab);
} else {
Expand All @@ -1502,6 +1560,10 @@ static void htab_map_free(struct bpf_map *map)
bpf_map_free_kptr_off_tab(map);
free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->pcpu_ma);
bpf_mem_alloc_destroy(&htab->ma);
if (htab->use_percpu_counter)
percpu_counter_destroy(&htab->pcount);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]);
lockdep_unregister_key(&htab->lockdep_key);
Expand Down
Loading

0 comments on commit 274052a

Please sign in to comment.