Skip to content

Commit

Permalink
Merge branch 'mptcp-rx-path-fixes'
Browse files Browse the repository at this point in the history
Matthieu Baerts says:

====================
mptcp: rx path fixes

Here are 3 different fixes, all related to the MPTCP receive buffer:

- Patch 1: fix receive buffer space when recvmsg() blocks after
  receiving some data. For a fix introduced in v6.12, backported to
  v6.1.

- Patch 2: mptcp_cleanup_rbuf() can be called when no data has been
  copied. For 5.11.

- Patch 3: prevent excessive coalescing on receive, which can affect the
  throughput badly. It looks better to wait a bit before backporting
  this one to stable versions, to get more results. For 5.10.
====================

Link: https://patch.msgid.link/20241230-net-mptcp-rbuf-fixes-v1-0-8608af434ceb@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Jan 3, 2025
2 parents 260466b + 56b824e commit 3473020
Showing 1 changed file with 12 additions and 11 deletions.
23 changes: 12 additions & 11 deletions net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
int delta;

if (MPTCP_SKB_CB(from)->offset ||
((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
!skb_try_coalesce(to, from, &fragstolen, &delta))
return false;

Expand Down Expand Up @@ -528,13 +529,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
}

static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
{
bool slow;

slow = lock_sock_fast(ssk);
if (tcp_can_send_ack(ssk))
tcp_cleanup_rbuf(ssk, 1);
tcp_cleanup_rbuf(ssk, copied);
unlock_sock_fast(ssk, slow);
}

Expand All @@ -551,22 +552,22 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
(ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
}

static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
{
int old_space = READ_ONCE(msk->old_wspace);
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
int space = __mptcp_space(sk);
bool cleanup, rx_empty;

cleanup = (space > 0) && (space >= (old_space << 1));
rx_empty = !__mptcp_rmem(sk);
cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
rx_empty = !__mptcp_rmem(sk) && copied;

mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
mptcp_subflow_cleanup_rbuf(ssk);
mptcp_subflow_cleanup_rbuf(ssk, copied);
}
}

Expand Down Expand Up @@ -1939,6 +1940,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto out;
}

static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);

static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
size_t len, int flags,
Expand Down Expand Up @@ -1992,6 +1995,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
break;
}

mptcp_rcv_space_adjust(msk, copied);
return copied;
}

Expand Down Expand Up @@ -2217,9 +2221,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,

copied += bytes_read;

/* be sure to advertise window change */
mptcp_cleanup_rbuf(msk);

if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
continue;

Expand Down Expand Up @@ -2268,15 +2269,15 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}

pr_debug("block timeout %ld\n", timeo);
mptcp_rcv_space_adjust(msk, copied);
mptcp_cleanup_rbuf(msk, copied);
err = sk_wait_data(sk, &timeo, NULL);
if (err < 0) {
err = copied ? : err;
goto out_err;
}
}

mptcp_rcv_space_adjust(msk, copied);
mptcp_cleanup_rbuf(msk, copied);

out_err:
if (cmsg_flags && copied >= 0) {
Expand Down

0 comments on commit 3473020

Please sign in to comment.