Skip to content

Commit

Permalink
Revert "udp_tunnel: GRO optimizations"
Browse files Browse the repository at this point in the history
Revert "udp_tunnel: use static call for GRO hooks when possible"
This reverts commit 311b365.

Revert "udp_tunnel: create a fastpath GRO lookup."
This reverts commit 8d4880d.

There are multiple small issues with the series. In the interest
of unblocking the merge window let's opt for a revert.

Link: https://lore.kernel.org/cover.1742557254.git.pabeni@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Mar 25, 2025
1 parent aa3651c commit 1f61542
Show file tree
Hide file tree
Showing 9 changed files with 2 additions and 249 deletions.
16 changes: 0 additions & 16 deletions include/linux/udp.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,13 +101,6 @@ struct udp_sock {

/* Cache friendly copy of sk->sk_peek_off >= 0 */
bool peeking_with_offset;

/*
* Accounting for the tunnel GRO fastpath.
* Unprotected by compilers guard, as it uses space available in
* the last UDP socket cacheline.
*/
struct hlist_node tunnel_list;
};

#define udp_test_bit(nr, sk) \
Expand Down Expand Up @@ -226,13 +219,4 @@ static inline void udp_allow_gso(struct sock *sk)

#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)

static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
{
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk);
#else
return NULL;
#endif
}

#endif /* _LINUX_UDP_H */
11 changes: 0 additions & 11 deletions include/net/netns/ipv4.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,6 @@ struct sysctl_fib_multipath_hash_seed {
};
#endif

struct udp_tunnel_gro {
struct sock __rcu *sk;
struct hlist_head list;
};

struct netns_ipv4 {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
Expand Down Expand Up @@ -90,11 +85,6 @@ struct netns_ipv4 {
struct inet_timewait_death_row tcp_death_row;
struct udp_table *udp_table;

#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
/* Not in a pernet subsys because need to be available at GRO stage */
struct udp_tunnel_gro udp_tunnel_gro[2];
#endif

#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
Expand Down Expand Up @@ -287,5 +277,4 @@ struct netns_ipv4 {
struct hlist_head *inet_addr_lst;
struct delayed_work addr_chk_work;
};

#endif
1 change: 0 additions & 1 deletion include/net/udp.h
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,6 @@ static inline void udp_lib_init_sock(struct sock *sk)
struct udp_sock *up = udp_sk(sk);

skb_queue_head_init(&up->reader_queue);
INIT_HLIST_NODE(&up->tunnel_list);
up->forward_threshold = sk->sk_rcvbuf >> 2;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
}
Expand Down
22 changes: 0 additions & 22 deletions include/net/udp_tunnel.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,28 +203,6 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
udp_encap_enable();
}

#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
#else
static inline void udp_tunnel_update_gro_lookup(struct net *net,
struct sock *sk, bool add) {}
static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
#endif

static inline void udp_tunnel_cleanup_gro(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
struct net *net = sock_net(sk);

udp_tunnel_update_gro_rcv(sk, false);

if (!up->tunnel_list.pprev)
return;

udp_tunnel_update_gro_lookup(net, sk, false);
}

#define UDP_TUNNEL_NIC_MAX_TABLES 4

enum udp_tunnel_nic_info_flags {
Expand Down
13 changes: 1 addition & 12 deletions net/ipv4/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -2891,10 +2891,8 @@ void udp_destroy_sock(struct sock *sk)
if (encap_destroy)
encap_destroy(sk);
}
if (udp_test_bit(ENCAP_ENABLED, sk)) {
if (udp_test_bit(ENCAP_ENABLED, sk))
static_branch_dec(&udp_encap_needed_key);
udp_tunnel_cleanup_gro(sk);
}
}
}

Expand Down Expand Up @@ -3806,15 +3804,6 @@ static void __net_init udp_set_table(struct net *net)

static int __net_init udp_pernet_init(struct net *net)
{
#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
int i;

/* No tunnel is configured */
for (i = 0; i < ARRAY_SIZE(net->ipv4.udp_tunnel_gro); ++i) {
INIT_HLIST_HEAD(&net->ipv4.udp_tunnel_gro[i].list);
RCU_INIT_POINTER(net->ipv4.udp_tunnel_gro[i].sk, NULL);
}
#endif
udp_sysctl_init(net);
udp_set_table(net);

Expand Down
167 changes: 1 addition & 166 deletions net/ipv4/udp_offload.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,164 +12,6 @@
#include <net/udp.h>
#include <net/protocol.h>
#include <net/inet_common.h>
#include <net/udp_tunnel.h>

#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)

/*
* Dummy GRO tunnel callback, exists mainly to avoid dangling/NULL
* values for the udp tunnel static call.
*/
static struct sk_buff *dummy_gro_rcv(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}

typedef struct sk_buff *(*udp_tunnel_gro_rcv_t)(struct sock *sk,
struct list_head *head,
struct sk_buff *skb);

struct udp_tunnel_type_entry {
udp_tunnel_gro_rcv_t gro_receive;
refcount_t count;
};

#define UDP_MAX_TUNNEL_TYPES (IS_ENABLED(CONFIG_GENEVE) + \
IS_ENABLED(CONFIG_VXLAN) * 2 + \
IS_ENABLED(CONFIG_NET_FOU) * 2)

DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv);
static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call);
static struct mutex udp_tunnel_gro_type_lock;
static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES];
static unsigned int udp_tunnel_gro_type_nr;
static DEFINE_SPINLOCK(udp_tunnel_gro_lock);

void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add)
{
bool is_ipv6 = sk->sk_family == AF_INET6;
struct udp_sock *tup, *up = udp_sk(sk);
struct udp_tunnel_gro *udp_tunnel_gro;

spin_lock(&udp_tunnel_gro_lock);
udp_tunnel_gro = &net->ipv4.udp_tunnel_gro[is_ipv6];
if (add)
hlist_add_head(&up->tunnel_list, &udp_tunnel_gro->list);
else
hlist_del_init(&up->tunnel_list);

if (udp_tunnel_gro->list.first &&
!udp_tunnel_gro->list.first->next) {
tup = hlist_entry(udp_tunnel_gro->list.first, struct udp_sock,
tunnel_list);

rcu_assign_pointer(udp_tunnel_gro->sk, (struct sock *)tup);
} else {
RCU_INIT_POINTER(udp_tunnel_gro->sk, NULL);
}

spin_unlock(&udp_tunnel_gro_lock);
}
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_lookup);

void udp_tunnel_update_gro_rcv(struct sock *sk, bool add)
{
struct udp_tunnel_type_entry *cur = NULL;
struct udp_sock *up = udp_sk(sk);
int i, old_gro_type_nr;

if (!up->gro_receive)
return;

mutex_lock(&udp_tunnel_gro_type_lock);
for (i = 0; i < udp_tunnel_gro_type_nr; i++)
if (udp_tunnel_gro_types[i].gro_receive == up->gro_receive)
cur = &udp_tunnel_gro_types[i];

old_gro_type_nr = udp_tunnel_gro_type_nr;
if (add) {
/*
* Update the matching entry, if found, or add a new one
* if needed
*/
if (cur) {
refcount_inc(&cur->count);
goto out;
}

if (unlikely(udp_tunnel_gro_type_nr == UDP_MAX_TUNNEL_TYPES)) {
pr_err_once("Too many UDP tunnel types, please increase UDP_MAX_TUNNEL_TYPES\n");
/* Ensure static call will never be enabled */
udp_tunnel_gro_type_nr = UDP_MAX_TUNNEL_TYPES + 2;
goto out;
}

cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++];
refcount_set(&cur->count, 1);
cur->gro_receive = up->gro_receive;
} else {
/*
* The stack cleanups only successfully added tunnel, the
* lookup on removal should never fail.
*/
if (WARN_ON_ONCE(!cur))
goto out;

if (!refcount_dec_and_test(&cur->count))
goto out;

/* avoid gaps, so that the enable tunnel has always id 0 */
*cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr];
}

if (udp_tunnel_gro_type_nr == 1) {
static_call_update(udp_tunnel_gro_rcv,
udp_tunnel_gro_types[0].gro_receive);
static_branch_enable(&udp_tunnel_static_call);
} else if (old_gro_type_nr == 1) {
static_branch_disable(&udp_tunnel_static_call);
static_call_update(udp_tunnel_gro_rcv, dummy_gro_rcv);
}

out:
mutex_unlock(&udp_tunnel_gro_type_lock);
}
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv);

static void udp_tunnel_gro_init(void)
{
mutex_init(&udp_tunnel_gro_type_lock);
}

static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
if (static_branch_likely(&udp_tunnel_static_call)) {
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return static_call(udp_tunnel_gro_rcv)(sk, head, skb);
}
return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
}

#else

static void udp_tunnel_gro_init(void) {}

static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
}

#endif

static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
Expand Down Expand Up @@ -780,7 +622,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,

skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
pp = udp_tunnel_gro_rcv(sk, head, skb);
pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);

out:
skb_gro_flush_final(skb, pp, flush);
Expand All @@ -793,13 +635,8 @@ static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
{
const struct iphdr *iph = skb_gro_network_header(skb);
struct net *net = dev_net_rcu(skb->dev);
struct sock *sk;
int iif, sdif;

sk = udp_tunnel_sk(net, false);
if (sk && dport == htons(sk->sk_num))
return sk;

inet_get_iif_sdif(skb, &iif, &sdif);

return __udp4_lib_lookup(net, iph->saddr, sport,
Expand Down Expand Up @@ -930,7 +767,5 @@ int __init udpv4_offload_init(void)
.gro_complete = udp4_gro_complete,
},
};

udp_tunnel_gro_init();
return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}
14 changes: 0 additions & 14 deletions net/ipv4/udp_tunnel_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,6 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
}
EXPORT_SYMBOL(udp_sock_create4);

static bool sk_saddr_any(struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
#else
return !sk->sk_rcv_saddr;
#endif
}

void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct udp_tunnel_sock_cfg *cfg)
{
Expand All @@ -89,11 +80,6 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
udp_sk(sk)->gro_complete = cfg->gro_complete;

udp_tunnel_encap_enable(sk);

udp_tunnel_update_gro_rcv(sock->sk, true);

if (!sk->sk_dport && !sk->sk_bound_dev_if && sk_saddr_any(sock->sk))
udp_tunnel_update_gro_lookup(net, sock->sk, true);
}
EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);

Expand Down
2 changes: 0 additions & 2 deletions net/ipv6/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/ip6_tunnel.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
Expand Down Expand Up @@ -1826,7 +1825,6 @@ void udpv6_destroy_sock(struct sock *sk)
if (udp_test_bit(ENCAP_ENABLED, sk)) {
static_branch_dec(&udpv6_encap_needed_key);
udp_encap_disable();
udp_tunnel_cleanup_gro(sk);
}
}
}
Expand Down
5 changes: 0 additions & 5 deletions net/ipv6/udp_offload.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,8 @@ static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
{
const struct ipv6hdr *iph = skb_gro_network_header(skb);
struct net *net = dev_net_rcu(skb->dev);
struct sock *sk;
int iif, sdif;

sk = udp_tunnel_sk(net, true);
if (sk && dport == htons(sk->sk_num))
return sk;

inet6_get_iif_sdif(skb, &iif, &sdif);

return __udp6_lib_lookup(net, &iph->saddr, sport,
Expand Down

0 comments on commit 1f61542

Please sign in to comment.