Skip to content

Commit

Permalink
Merge branch 'af_unix-fix-lockless-access-of-sk-sk_state-and-others-f…
Browse files Browse the repository at this point in the history
…ields'

Kuniyuki Iwashima says:

====================
af_unix: Fix lockless access of sk->sk_state and others fields.

The patch 1 fixes a bug where SOCK_DGRAM's sk->sk_state is changed
to TCP_CLOSE even if the socket is connect()ed to another socket.

The rest of this series annotates lockless accesses to the following
fields.

  * sk->sk_state
  * sk->sk_sndbuf
  * net->unx.sysctl_max_dgram_qlen
  * sk->sk_receive_queue.qlen
  * sk->sk_shutdown

Note that with this series there is skb_queue_empty() left in
unix_dgram_disconnected() that needs to be changed to lockless
version, and unix_peer(other) access there should be protected
by unix_state_lock().

This will require some refactoring, so another series will follow.

Changes:
  v2:
    * Patch 1: Fix wrong double lock

  v1: https://lore.kernel.org/netdev/20240603143231.62085-1-kuniyu@amazon.com/
====================

Link: https://lore.kernel.org/r/20240604165241.44758-1-kuniyu@amazon.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
  • Loading branch information
Paolo Abeni committed Jun 6, 2024
2 parents b0c9a26 + efaf24e commit 411c0ea
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 52 deletions.
90 changes: 44 additions & 46 deletions net/unix/af_unix.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}

static inline int unix_recvq_full(const struct sock *sk)
{
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}

static inline int unix_recvq_full_lockless(const struct sock *sk)
{
return skb_queue_len_lockless(&sk->sk_receive_queue) >
READ_ONCE(sk->sk_max_ack_backlog);
return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}

struct sock *unix_peer_get(struct sock *s)
Expand Down Expand Up @@ -530,18 +524,18 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
return 0;
}

static int unix_writable(const struct sock *sk)
static int unix_writable(const struct sock *sk, unsigned char state)
{
return sk->sk_state != TCP_LISTEN &&
(refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
return state != TCP_LISTEN &&
(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
}

static void unix_write_space(struct sock *sk)
{
struct socket_wq *wq;

rcu_read_lock();
if (unix_writable(sk)) {
if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
Expand Down Expand Up @@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk_error_report(other);
}
}
other->sk_state = TCP_CLOSE;
}

static void unix_sock_destructor(struct sock *sk)
Expand Down Expand Up @@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
WRITE_ONCE(sk->sk_state, TCP_CLOSE);

skpair = unix_peer(sk);
unix_peer(sk) = NULL;
Expand All @@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
Expand Down Expand Up @@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
WRITE_ONCE(sk->sk_state, TCP_LISTEN);

/* set credentials so connect can copy them */
init_peercred(sk);
err = 0;
Expand Down Expand Up @@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->listener = NULL;
Expand Down Expand Up @@ -1402,7 +1396,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if (err)
goto out_unlock;

sk->sk_state = other->sk_state = TCP_ESTABLISHED;
WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
Expand All @@ -1419,13 +1414,20 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,

unix_peer(sk) = other;
if (!other)
sk->sk_state = TCP_CLOSE;
WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);

unix_state_double_unlock(sk, other);

if (other != old_peer)
if (other != old_peer) {
unix_dgram_disconnected(sk, old_peer);

unix_state_lock(old_peer);
if (!unix_peer(old_peer))
WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
unix_state_unlock(old_peer);
}

sock_put(old_peer);
} else {
unix_peer(sk) = other;
Expand Down Expand Up @@ -1473,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct sk_buff *skb = NULL;
long timeo;
int err;
int st;

err = unix_validate_addr(sunaddr, addr_len);
if (err)
Expand Down Expand Up @@ -1538,7 +1539,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;

if (unix_recvq_full(other)) {
if (unix_recvq_full_lockless(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
Expand All @@ -1563,9 +1564,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
Well, and we have to recheck the state after socket locked.
*/
st = sk->sk_state;

switch (st) {
switch (READ_ONCE(sk->sk_state)) {
case TCP_CLOSE:
/* This is ok... continue with connect */
break;
Expand All @@ -1580,7 +1579,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,

unix_state_lock_nested(sk, U_LOCK_SECOND);

if (sk->sk_state != st) {
if (sk->sk_state != TCP_CLOSE) {
unix_state_unlock(sk);
unix_state_unlock(other);
sock_put(other);
Expand Down Expand Up @@ -1633,7 +1632,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
copy_peercred(sk, other);

sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
sock_hold(newsk);

smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
Expand Down Expand Up @@ -1705,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
goto out;

arg->err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
goto out;

/* If socket state is TCP_LISTEN it cannot change (for now...),
Expand Down Expand Up @@ -1962,7 +1961,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
}

err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
if (len > READ_ONCE(sk->sk_sndbuf) - 32)
goto out;

if (len > SKB_MAX_ALLOC) {
Expand Down Expand Up @@ -2044,7 +2043,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);

sk->sk_state = TCP_CLOSE;
WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_state_unlock(sk);

unix_dgram_disconnected(sk, other);
Expand Down Expand Up @@ -2221,7 +2220,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}

if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
err = -ENOTCONN;
Expand All @@ -2242,7 +2241,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);

/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
Expand Down Expand Up @@ -2335,7 +2334,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
if (err)
return err;

if (sk->sk_state != TCP_ESTABLISHED)
if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;

if (msg->msg_namelen)
Expand All @@ -2349,7 +2348,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;

if (sk->sk_state != TCP_ESTABLISHED)
if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;

return unix_dgram_recvmsg(sock, msg, size, flags);
Expand Down Expand Up @@ -2654,7 +2653,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,

static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
if (unlikely(sk->sk_state != TCP_ESTABLISHED))
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;

return unix_read_skb(sk, recv_actor);
Expand All @@ -2678,7 +2677,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
size_t size = state->size;
unsigned int last_len;

if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
Expand Down Expand Up @@ -3009,7 +3008,7 @@ long unix_inq_len(struct sock *sk)
struct sk_buff *skb;
long amount = 0;

if (sk->sk_state == TCP_LISTEN)
if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;

spin_lock(&sk->sk_receive_queue.lock);
Expand Down Expand Up @@ -3121,12 +3120,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned char state;
__poll_t mask;
u8 shutdown;

sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
state = READ_ONCE(sk->sk_state);

/* exceptional events? */
if (READ_ONCE(sk->sk_err))
Expand All @@ -3148,14 +3149,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa

/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
sk->sk_state == TCP_CLOSE)
state == TCP_CLOSE)
mask |= EPOLLHUP;

/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
if (unix_writable(sk))
if (unix_writable(sk, state))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;

return mask;
Expand All @@ -3166,12 +3167,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
unsigned char state;
__poll_t mask;
u8 shutdown;

sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
state = READ_ONCE(sk->sk_state);

/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
Expand All @@ -3191,19 +3194,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;

/* Connection-based need to check for termination and startup */
if (sk->sk_type == SOCK_SEQPACKET) {
if (sk->sk_state == TCP_CLOSE)
mask |= EPOLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
return mask;
}
if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
mask |= EPOLLHUP;

/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;

writable = unix_writable(sk);
writable = unix_writable(sk, state);
if (writable) {
unix_state_lock(sk);

Expand Down
12 changes: 6 additions & 6 deletions net/unix/diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
u32 *buf;
int i;

if (sk->sk_state == TCP_LISTEN) {
if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);

attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
Expand Down Expand Up @@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;

if (sk->sk_state == TCP_LISTEN) {
rql.udiag_rqueue = sk->sk_receive_queue.qlen;
if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
Expand Down Expand Up @@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
rep->udiag_state = sk->sk_state;
rep->udiag_state = READ_ONCE(sk->sk_state);
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
Expand Down Expand Up @@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;

if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
goto out_nlmsg_trim;

if ((req->udiag_show & UDIAG_SHOW_UID) &&
Expand Down Expand Up @@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
sk_for_each(sk, &net->unx.table.buckets[slot]) {
if (num < s_num)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
goto next;
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
Expand Down

0 comments on commit 411c0ea

Please sign in to comment.