Skip to content

Commit

Permalink
Merge branch 'wireguard-fixes-for-5-12-rc1'
Browse files Browse the repository at this point in the history
Jason Donenfeld says:

====================
wireguard fixes for 5.12-rc1

This series has a collection of fixes that have piled up for a little
while now, that I unfortunately didn't get a chance to send out earlier.

1) Removes unlikely() from IS_ERR(), since it's already implied.

2) Remove a bogus sparse annotation that hasn't been needed for years.

3) Addition test in the test suite for stressing parallel ndo_start_xmit.

4) Slight struct reordering in preparation for subsequent fix.

5) If skb->protocol is bogus, we no longer attempt to send icmp messages.

6) Massive memory usage fix, hit by larger deployments.

7) Fix typo in kconfig dependency logic.

(1) and (2) are tiny cleanups, and (3) is just a test, so if you're
trying to reduce churn, you could not backport these. But (4), (5), (6),
and (7) fix problems and should be applied to stable. IMO, it's probably
easiest to just apply them all to stable.
====================

Link: https://lore.kernel.org/r/20210222162549.3252778-1-Jason@zx2c4.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Feb 23, 2021
2 parents fc0494e + bce2473 commit fcb3007
Show file tree
Hide file tree
Showing 11 changed files with 170 additions and 105 deletions.
2 changes: 1 addition & 1 deletion drivers/net/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ config WIREGUARD
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
select ARM_CRYPTO if ARM
select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
Expand Down
21 changes: 11 additions & 10 deletions drivers/net/wireguard/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
else if (skb->protocol == htons(ETH_P_IPV6))
net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
dev->name, &ipv6_hdr(skb)->daddr);
goto err;
goto err_icmp;
}

family = READ_ONCE(peer->endpoint.addr.sa_family);
Expand All @@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
struct sk_buff *segs = skb_gso_segment(skb, 0);

if (unlikely(IS_ERR(segs))) {
if (IS_ERR(segs)) {
ret = PTR_ERR(segs);
goto err_peer;
}
Expand Down Expand Up @@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)

err_peer:
wg_peer_put(peer);
err:
++dev->stats.tx_errors;
err_icmp:
if (skb->protocol == htons(ETH_P_IP))
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err:
++dev->stats.tx_errors;
kfree_skb(skb);
return ret;
}
Expand Down Expand Up @@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
wg_packet_queue_free(&wg->decrypt_queue, true);
wg_packet_queue_free(&wg->encrypt_queue, true);
wg_packet_queue_free(&wg->decrypt_queue);
wg_packet_queue_free(&wg->encrypt_queue);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
Expand Down Expand Up @@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
goto err_destroy_handshake_send;

ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
true, MAX_QUEUED_PACKETS);
MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_destroy_packet_crypt;

ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
true, MAX_QUEUED_PACKETS);
MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_free_encrypt_queue;

Expand All @@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
err_free_decrypt_queue:
wg_packet_queue_free(&wg->decrypt_queue, true);
wg_packet_queue_free(&wg->decrypt_queue);
err_free_encrypt_queue:
wg_packet_queue_free(&wg->encrypt_queue, true);
wg_packet_queue_free(&wg->encrypt_queue);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
Expand Down
15 changes: 8 additions & 7 deletions drivers/net/wireguard/device.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,14 @@ struct multicore_worker {

struct crypt_queue {
struct ptr_ring ring;
union {
struct {
struct multicore_worker __percpu *worker;
int last_cpu;
};
struct work_struct work;
};
struct multicore_worker __percpu *worker;
int last_cpu;
};

struct prev_queue {
struct sk_buff *head, *tail, *peeked;
struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff.
atomic_t count;
};

struct wg_device {
Expand Down
28 changes: 9 additions & 19 deletions drivers/net/wireguard/peer.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
peer = kzalloc(sizeof(*peer), GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
peer->device = wg;
if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
goto err;

peer->device = wg;
wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
public_key, preshared_key, peer);
if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
goto err_1;
if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
MAX_QUEUED_PACKETS))
goto err_2;
if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
MAX_QUEUED_PACKETS))
goto err_3;

peer->internal_id = atomic64_inc_return(&peer_counter);
peer->serial_work_cpu = nr_cpumask_bits;
wg_cookie_init(&peer->latest_cookie);
wg_timers_init(peer);
wg_cookie_checker_precompute_peer_keys(peer);
spin_lock_init(&peer->keypairs.keypair_update_lock);
INIT_WORK(&peer->transmit_handshake_work,
wg_packet_handshake_send_worker);
INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
wg_prev_queue_init(&peer->tx_queue);
wg_prev_queue_init(&peer->rx_queue);
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
Expand All @@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
return peer;

err_3:
wg_packet_queue_free(&peer->tx_queue, false);
err_2:
dst_cache_destroy(&peer->endpoint_cache);
err_1:
err:
kfree(peer);
return ERR_PTR(ret);
}
Expand Down Expand Up @@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu)
struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);

dst_cache_destroy(&peer->endpoint_cache);
wg_packet_queue_free(&peer->rx_queue, false);
wg_packet_queue_free(&peer->tx_queue, false);
WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));

/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
Expand Down
8 changes: 4 additions & 4 deletions drivers/net/wireguard/peer.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,17 @@ struct endpoint {

struct wg_peer {
struct wg_device *device;
struct crypt_queue tx_queue, rx_queue;
struct prev_queue tx_queue, rx_queue;
struct sk_buff_head staged_packet_queue;
int serial_work_cpu;
bool is_dead;
struct noise_keypairs keypairs;
struct endpoint endpoint;
struct dst_cache endpoint_cache;
rwlock_t endpoint_lock;
struct noise_handshake handshake;
atomic64_t last_sent_handshake;
struct work_struct transmit_handshake_work, clear_peer_work;
struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work;
struct cookie latest_cookie;
struct hlist_node pubkey_hash;
u64 rx_bytes, tx_bytes;
Expand All @@ -61,9 +62,8 @@ struct wg_peer {
struct rcu_head rcu;
struct list_head peer_list;
struct list_head allowedips_list;
u64 internal_id;
struct napi_struct napi;
bool is_dead;
u64 internal_id;
};

struct wg_peer *wg_peer_create(struct wg_device *wg,
Expand Down
86 changes: 69 additions & 17 deletions drivers/net/wireguard/queueing.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@ struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
{
int cpu;
struct multicore_worker __percpu *worker =
alloc_percpu(struct multicore_worker);
struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);

if (!worker)
return NULL;
Expand All @@ -23,33 +22,86 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
}

int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
bool multicore, unsigned int len)
unsigned int len)
{
int ret;

memset(queue, 0, sizeof(*queue));
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
if (function) {
if (multicore) {
queue->worker = wg_packet_percpu_multicore_worker_alloc(
function, queue);
if (!queue->worker) {
ptr_ring_cleanup(&queue->ring, NULL);
return -ENOMEM;
}
} else {
INIT_WORK(&queue->work, function);
}
queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
if (!queue->worker) {
ptr_ring_cleanup(&queue->ring, NULL);
return -ENOMEM;
}
return 0;
}

void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
void wg_packet_queue_free(struct crypt_queue *queue)
{
if (multicore)
free_percpu(queue->worker);
free_percpu(queue->worker);
WARN_ON(!__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, NULL);
}

#define NEXT(skb) ((skb)->prev)
#define STUB(queue) ((struct sk_buff *)&queue->empty)

void wg_prev_queue_init(struct prev_queue *queue)
{
NEXT(STUB(queue)) = NULL;
queue->head = queue->tail = STUB(queue);
queue->peeked = NULL;
atomic_set(&queue->count, 0);
BUILD_BUG_ON(
offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
offsetof(struct prev_queue, empty) ||
offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
offsetof(struct prev_queue, empty));
}

static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
{
WRITE_ONCE(NEXT(skb), NULL);
WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
}

bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
{
if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
return false;
__wg_prev_queue_enqueue(queue, skb);
return true;
}

struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
{
struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));

if (tail == STUB(queue)) {
if (!next)
return NULL;
queue->tail = next;
tail = next;
next = smp_load_acquire(&NEXT(next));
}
if (next) {
queue->tail = next;
atomic_dec(&queue->count);
return tail;
}
if (tail != READ_ONCE(queue->head))
return NULL;
__wg_prev_queue_enqueue(queue, STUB(queue));
next = smp_load_acquire(&NEXT(tail));
if (next) {
queue->tail = next;
atomic_dec(&queue->count);
return tail;
}
return NULL;
}

#undef NEXT
#undef STUB
45 changes: 33 additions & 12 deletions drivers/net/wireguard/queueing.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@ struct wg_device;
struct wg_peer;
struct multicore_worker;
struct crypt_queue;
struct prev_queue;
struct sk_buff;

/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
bool multicore, unsigned int len);
void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
unsigned int len);
void wg_packet_queue_free(struct crypt_queue *queue);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);

Expand Down Expand Up @@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next)
return cpu;
}

void wg_prev_queue_init(struct prev_queue *queue);

/* Multi producer */
bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);

/* Single consumer */
struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);

/* Single consumer */
static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
{
if (queue->peeked)
return queue->peeked;
queue->peeked = wg_prev_queue_dequeue(queue);
return queue->peeked;
}

/* Single consumer */
static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
{
queue->peeked = NULL;
}

static inline int wg_queue_enqueue_per_device_and_peer(
struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
struct crypt_queue *device_queue, struct prev_queue *peer_queue,
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
{
int cpu;
Expand All @@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* We first queue this up for the peer ingestion, but the consumer
* will wait for the state to change to CRYPTED or DEAD before.
*/
if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
return -ENOSPC;

/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
Expand All @@ -157,24 +182,20 @@ static inline int wg_queue_enqueue_per_device_and_peer(
return 0;
}

static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
struct sk_buff *skb,
enum packet_state state)
static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
*/
struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));

atomic_set_release(&PACKET_CB(skb)->state, state);
queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
peer->internal_id),
peer->device->packet_crypt_wq, &queue->work);
queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
peer->device->packet_crypt_wq, &peer->transmit_packet_work);
wg_peer_put(peer);
}

static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
enum packet_state state)
static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
{
/* We take a reference, because as soon as we call atomic_set, the
* peer can be freed from below us.
Expand Down
Loading

0 comments on commit fcb3007

Please sign in to comment.