Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Browse files Browse the repository at this point in the history
Daniel Borkmann says:

====================
pull-request: bpf 2018-08-24

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix BPF sockmap and tls where we get a hang in do_tcp_sendpages()
   when sndbuf is full due to missing calls into underlying socket's
   sk_write_space(), from John.

2) Two BPF sockmap fixes to reject invalid parameters on map creation
   and to fix a map element miscount on allocation failure. Another fix
   for BPF hash tables to use per hash table salt for jhash(), from Daniel.

3) Fix for bpftool's command line parsing in order to terminate on bad
   arguments instead of keeping looping in some border cases, from Quentin.

4) Fix error value of xdp_umem_assign_dev() in order to comply with
   expected bind ops error codes, from Prashant.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Aug 24, 2018
2 parents c08eeba + 785e76d commit ff0fadf
Show file tree
Hide file tree
Showing 5 changed files with 35 additions and 17 deletions.
23 changes: 13 additions & 10 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <uapi/linux/btf.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
Expand All @@ -41,6 +42,7 @@ struct bpf_htab {
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
u32 hashrnd;
};

/* each htab element is struct htab_elem + key + value */
Expand Down Expand Up @@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets)
goto free_htab;

htab->hashrnd = get_random_int();
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
Expand Down Expand Up @@ -402,9 +405,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
return ERR_PTR(err);
}

static inline u32 htab_map_hash(const void *key, u32 key_len)
static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
{
return jhash(key, key_len, 0);
return jhash(key, key_len, hashrnd);
}

static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
Expand Down Expand Up @@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);

head = select_bucket(htab, hash);

Expand Down Expand Up @@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
if (!key)
goto find_first_elem;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);

head = select_bucket(htab, hash);

Expand Down Expand Up @@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);

b = __select_bucket(htab, hash);
head = &b->head;
Expand Down Expand Up @@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);

b = __select_bucket(htab, hash);
head = &b->head;
Expand Down Expand Up @@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);

b = __select_bucket(htab, hash);
head = &b->head;
Expand Down Expand Up @@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);

b = __select_bucket(htab, hash);
head = &b->head;
Expand Down Expand Up @@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;

Expand Down Expand Up @@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)

key_size = map->key_size;

hash = htab_map_hash(key, key_size);
hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;

Expand Down
11 changes: 9 additions & 2 deletions kernel/bpf/sockmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1427,12 +1427,15 @@ static void smap_tx_work(struct work_struct *w)
static void smap_write_space(struct sock *sk)
{
struct smap_psock *psock;
void (*write_space)(struct sock *sk);

rcu_read_lock();
psock = smap_psock_sk(sk);
if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
schedule_work(&psock->tx_work);
write_space = psock->save_write_space;
rcu_read_unlock();
write_space(sk);
}

static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
Expand Down Expand Up @@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM);

/* check sanity of attributes */
if (attr->max_entries == 0 || attr->value_size != 4 ||
if (attr->max_entries == 0 ||
attr->key_size == 0 ||
attr->value_size != 4 ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);

Expand Down Expand Up @@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
}
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node);
if (!l_new)
if (!l_new) {
atomic_dec(&htab->count);
return ERR_PTR(-ENOMEM);
}

memcpy(l_new->key, key, key_size);
l_new->sk = sk;
Expand Down
9 changes: 7 additions & 2 deletions net/tls/tls_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);

/* We are already sending pages, ignore notification */
if (ctx->in_tcp_sendpages)
/* If in_tcp_sendpages call lower protocol write space handler
* to ensure we wake up any waiting operations there. For example
* if do_tcp_sendpages where to call sk_wait_event.
*/
if (ctx->in_tcp_sendpages) {
ctx->sk_write_space(sk);
return;
}

if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
Expand Down
4 changes: 2 additions & 2 deletions net/xdp/xdp_umem.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0;

if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */

bpf.command = XDP_QUERY_XSK_UMEM;

rtnl_lock();
err = xdp_umem_query(dev, queue_id);
if (err) {
err = err < 0 ? -ENOTSUPP : -EBUSY;
err = err < 0 ? -EOPNOTSUPP : -EBUSY;
goto err_rtnl_unlock;
}

Expand Down
5 changes: 4 additions & 1 deletion tools/bpf/bpftool/map_perf_ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
}

while (argc) {
if (argc < 2)
if (argc < 2) {
BAD_ARG();
goto err_close_map;
}

if (is_prefix(*argv, "cpu")) {
char *endptr;
Expand All @@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
NEXT_ARG();
} else {
BAD_ARG();
goto err_close_map;
}

do_all = false;
Expand Down

0 comments on commit ff0fadf

Please sign in to comment.