Skip to content

Commit

Permalink
bpf: rework memlock-based memory accounting for maps
Browse files Browse the repository at this point in the history
In order to unify the existing memlock charging code with the
memcg-based memory accounting, which will be added later, let's
rework the current scheme.

Currently the following design is used:
  1) .alloc() callback optionally checks if the allocation will likely
     succeed using bpf_map_precharge_memlock()
  2) .alloc() performs actual allocations
  3) .alloc() callback calculates map cost and sets map.memory.pages
  4) map_create() calls bpf_map_init_memlock() which sets map.memory.user
     and performs actual charging; in case of failure the map is
     destroyed
  <map is in use>
  1) bpf_map_free_deferred() calls bpf_map_release_memlock(), which
     performs uncharge and releases the user
  2) .map_free() callback releases the memory

The scheme can be simplified and made more robust:
  1) .alloc() calculates map cost and calls bpf_map_charge_init()
  2) bpf_map_charge_init() sets map.memory.user and performs actual
    charge
  3) .alloc() performs actual allocations
  <map is in use>
  1) .map_free() callback releases the memory
  2) bpf_map_charge_finish() performs uncharge and releases the user

The new scheme also allows to reuse bpf_map_charge_init()/finish()
functions for memcg-based accounting. Because charges are performed
before actual allocations and uncharges after freeing the memory,
no bogus memory pressure can be created.

In cases when the map structure is not available (e.g. it's not
created yet, or is already destroyed), on-stack bpf_map_memory
structure is used. The charge can be transferred with the
bpf_map_charge_move() function.

Signed-off-by: Roman Gushchin <guro@fb.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Roman Gushchin authored and Alexei Starovoitov committed May 31, 2019
1 parent 3539b96 commit b936ca6
Show file tree
Hide file tree
Showing 14 changed files with 112 additions and 88 deletions.
5 changes: 4 additions & 1 deletion include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -650,9 +650,12 @@ struct bpf_map *__bpf_map_get(struct fd f);
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages);
void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
Expand Down
10 changes: 7 additions & 3 deletions kernel/bpf/arraymap.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
u64 cost, array_size, mask64;
struct bpf_map_memory mem;
struct bpf_array *array;

elem_size = round_up(attr->value_size, 8);
Expand Down Expand Up @@ -125,23 +126,26 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
}
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

ret = bpf_map_precharge_memlock(cost);
ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)
return ERR_PTR(ret);

/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
if (!array)
if (!array) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
}
array->index_mask = index_mask;
array->map.unpriv_array = unpriv;

/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
array->map.memory.pages = cost;
bpf_map_charge_move(&array->map.memory, &mem);
array->elem_size = elem_size;

if (percpu && bpf_array_alloc_percpu(array)) {
bpf_map_charge_finish(&array->map.memory);
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
Expand Down
8 changes: 5 additions & 3 deletions kernel/bpf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
goto free_cmap;
cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

/* Notice returns -EPERM on if map size is larger than memlock limit */
ret = bpf_map_precharge_memlock(cmap->map.memory.pages);
ret = bpf_map_charge_init(&cmap->map.memory,
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
if (ret) {
err = ret;
goto free_cmap;
Expand All @@ -121,7 +121,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
cmap->flush_needed = __alloc_percpu(cpu_map_bitmap_size(attr),
__alignof__(unsigned long));
if (!cmap->flush_needed)
goto free_cmap;
goto free_charge;

/* Alloc array for possible remote "destination" CPUs */
cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
Expand All @@ -133,6 +133,8 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
return &cmap->map;
free_percpu:
free_percpu(cmap->flush_needed);
free_charge:
bpf_map_charge_finish(&cmap->map.memory);
free_cmap:
kfree(cmap);
return ERR_PTR(err);
Expand Down
13 changes: 7 additions & 6 deletions kernel/bpf/devmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,10 +111,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE)
goto free_dtab;

dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

/* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(dtab->map.memory.pages);
/* if map size is larger than memlock limit, reject it */
err = bpf_map_charge_init(&dtab->map.memory,
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
if (err)
goto free_dtab;

Expand All @@ -125,19 +124,21 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
__alignof__(unsigned long),
GFP_KERNEL | __GFP_NOWARN);
if (!dtab->flush_needed)
goto free_dtab;
goto free_charge;

dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
goto free_dtab;
goto free_charge;

spin_lock(&dev_map_lock);
list_add_tail_rcu(&dtab->list, &dev_map_list);
spin_unlock(&dev_map_lock);

return &dtab->map;
free_charge:
bpf_map_charge_finish(&dtab->map.memory);
free_dtab:
free_percpu(dtab->flush_needed);
kfree(dtab);
Expand Down
11 changes: 6 additions & 5 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,10 +364,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */
goto free_htab;

htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

/* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(htab->map.memory.pages);
/* if map size is larger than memlock limit, reject it */
err = bpf_map_charge_init(&htab->map.memory,
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
if (err)
goto free_htab;

Expand All @@ -376,7 +375,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
sizeof(struct bucket),
htab->map.numa_node);
if (!htab->buckets)
goto free_htab;
goto free_charge;

if (htab->map.map_flags & BPF_F_ZERO_SEED)
htab->hashrnd = 0;
Expand Down Expand Up @@ -409,6 +408,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
prealloc_destroy(htab);
free_buckets:
bpf_map_area_free(htab->buckets);
free_charge:
bpf_map_charge_finish(&htab->map.memory);
free_htab:
kfree(htab);
return ERR_PTR(err);
Expand Down
9 changes: 6 additions & 3 deletions kernel/bpf/local_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_cgroup_storage_map *map;
struct bpf_map_memory mem;
u32 pages;
int ret;

Expand All @@ -294,16 +295,18 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)

pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >>
PAGE_SHIFT;
ret = bpf_map_precharge_memlock(pages);
ret = bpf_map_charge_init(&mem, pages);
if (ret < 0)
return ERR_PTR(ret);

map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
__GFP_ZERO | GFP_USER, numa_node);
if (!map)
if (!map) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
}

map->map.memory.pages = pages;
bpf_map_charge_move(&map->map.memory, &mem);

/* copy mandatory map attributes */
bpf_map_init_from_attr(&map->map, attr);
Expand Down
5 changes: 2 additions & 3 deletions kernel/bpf/lpm_trie.c
Original file line number Diff line number Diff line change
Expand Up @@ -578,9 +578,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
goto out_err;
}

trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

ret = bpf_map_precharge_memlock(trie->map.memory.pages);
ret = bpf_map_charge_init(&trie->map.memory,
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
if (ret)
goto out_err;

Expand Down
9 changes: 6 additions & 3 deletions kernel/bpf/queue_stack_maps.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ static int queue_stack_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
int ret, numa_node = bpf_map_attr_numa_node(attr);
struct bpf_map_memory mem = {0};
struct bpf_queue_stack *qs;
u64 size, queue_size, cost;

Expand All @@ -77,19 +78,21 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)

cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

ret = bpf_map_precharge_memlock(cost);
ret = bpf_map_charge_init(&mem, cost);
if (ret < 0)
return ERR_PTR(ret);

qs = bpf_map_area_alloc(queue_size, numa_node);
if (!qs)
if (!qs) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
}

memset(qs, 0, sizeof(*qs));

bpf_map_init_from_attr(&qs->map, attr);

qs->map.memory.pages = cost;
bpf_map_charge_move(&qs->map.memory, &mem);
qs->size = size;

raw_spin_lock_init(&qs->lock);
Expand Down
9 changes: 6 additions & 3 deletions kernel/bpf/reuseport_array.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
{
int err, numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array;
struct bpf_map_memory mem;
u64 cost, array_size;

if (!capable(CAP_SYS_ADMIN))
Expand All @@ -165,18 +166,20 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

err = bpf_map_precharge_memlock(cost);
err = bpf_map_charge_init(&mem, cost);
if (err)
return ERR_PTR(err);

/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
if (!array)
if (!array) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);
}

/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
array->map.memory.pages = cost;
bpf_map_charge_move(&array->map.memory, &mem);

return &array->map;
}
Expand Down
30 changes: 17 additions & 13 deletions kernel/bpf/stackmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
{
u32 value_size = attr->value_size;
struct bpf_stack_map *smap;
struct bpf_map_memory mem;
u64 cost, n_buckets;
int err;

Expand Down Expand Up @@ -116,40 +117,43 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
n_buckets = roundup_pow_of_two(attr->max_entries);

cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);

err = bpf_map_charge_init(&mem,
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
if (err)
return ERR_PTR(err);

smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
if (!smap)
if (!smap) {
bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM);

err = -E2BIG;
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
if (cost >= U32_MAX - PAGE_SIZE)
goto free_smap;
}

bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
smap->n_buckets = n_buckets;
smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

err = bpf_map_precharge_memlock(smap->map.memory.pages);
if (err)
goto free_smap;

err = get_callchain_buffers(sysctl_perf_event_max_stack);
if (err)
goto free_smap;
goto free_charge;

err = prealloc_elems_and_freelist(smap);
if (err)
goto put_buffers;

bpf_map_charge_move(&smap->map.memory, &mem);

return &smap->map;

put_buffers:
put_callchain_buffers();
free_smap:
free_charge:
bpf_map_charge_finish(&mem);
bpf_map_area_free(smap);
return ERR_PTR(err);
}
Expand Down
Loading

0 comments on commit b936ca6

Please sign in to comment.