Skip to content

Commit

Permalink
Merge branch 'net-few-debug-refinements'
Browse files Browse the repository at this point in the history
Eric Dumazet says:

====================
net: few debug refinements

Adopt DEBUG_NET_WARN_ON_ONCE() or WARN_ON_ONCE()
in some places where it makes sense.

Add checks in napi_consume_skb() and __napi_alloc_skb()

Make sure napi_get_frags() does not use page fragments
for skb->head.
====================

Link: https://lore.kernel.org/r/20220608160438.1342569-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Jun 10, 2022
2 parents f5f37fc + fd9ea57 commit 3000024
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 15 deletions.
20 changes: 19 additions & 1 deletion net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -3925,7 +3925,7 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_LOOPBACK;
if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
skb_dst_force(skb);
netif_rx(skb);
return 0;
Expand Down Expand Up @@ -6351,6 +6351,23 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);

/* Double check that napi_get_frags() allocates skbs with
* skb->head being backed by slab, not a page fragment.
* This is to make sure bug fixed in 3226b158e67c
* ("net: avoid 32 x truesize under-estimation for tiny skbs")
* does not accidentally come back.
*/
static void napi_get_frags_check(struct napi_struct *napi)
{
struct sk_buff *skb;

local_bh_disable();
skb = napi_get_frags(napi);
WARN_ON_ONCE(skb && skb->head_frag);
napi_free_frags(napi);
local_bh_enable();
}

void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
Expand Down Expand Up @@ -6378,6 +6395,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
napi_get_frags_check(napi);
/* Create kthread for this napi if dev->threaded is set.
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
Expand Down
5 changes: 3 additions & 2 deletions net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -560,6 +560,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
struct sk_buff *skb;
void *data;

DEBUG_NET_WARN_ON_ONCE(!in_softirq());
len += NET_SKB_PAD + NET_IP_ALIGN;

/* If requested length is either too small or too big,
Expand Down Expand Up @@ -728,7 +729,7 @@ void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
if (skb->destructor) {
WARN_ON(in_hardirq());
DEBUG_NET_WARN_ON_ONCE(in_hardirq());
skb->destructor(skb);
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Expand Down Expand Up @@ -981,7 +982,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}

lockdep_assert_in_softirq();
DEBUG_NET_WARN_ON_ONCE(!in_softirq());

if (!skb_unref(skb))
return;
Expand Down
2 changes: 1 addition & 1 deletion net/core/sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -2844,7 +2844,7 @@ void __release_sock(struct sock *sk)
do {
next = skb->next;
prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb));
DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);

Expand Down
6 changes: 3 additions & 3 deletions net/core/stream.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,13 @@ void sk_stream_kill_queues(struct sock *sk)
__skb_queue_purge(&sk->sk_receive_queue);

/* Next, the write queue. */
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
WARN_ON_ONCE(!skb_queue_empty(&sk->sk_write_queue));

/* Account for returned memory. */
sk_mem_reclaim_final(sk);

WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
WARN_ON_ONCE(sk->sk_wmem_queued);
WARN_ON_ONCE(sk->sk_forward_alloc);

/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
Expand Down
8 changes: 4 additions & 4 deletions net/ipv4/af_inet.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,10 @@ void inet_sock_destruct(struct sock *sk)
return;
}

WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk_forward_alloc_get(sk));
WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
WARN_ON_ONCE(sk->sk_wmem_queued);
WARN_ON_ONCE(sk_forward_alloc_get(sk));

kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
Expand Down
8 changes: 4 additions & 4 deletions net/unix/af_unix.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ static void __unix_remove_socket(struct sock *sk)

static void __unix_insert_socket(struct sock *sk)
{
WARN_ON(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
}

Expand Down Expand Up @@ -554,9 +554,9 @@ static void unix_sock_destructor(struct sock *sk)
u->oob_skb = NULL;
}
#endif
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
Expand Down

0 comments on commit 3000024

Please sign in to comment.