Skip to content

Commit

Permalink
Merge branch 'tcp-better-receiver-autotuning'
Browse files Browse the repository at this point in the history
Eric Dumazet says:

====================
tcp: better receiver autotuning

Now TCP senders no longer backoff when a drop is detected,
it appears we are very often receive window limited.

This series makes tcp_rcv_space_adjust() slightly more robust
and responsive.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Dec 12, 2017
2 parents c360f2b + c3916ad commit 48d79b4
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 20 deletions.
2 changes: 1 addition & 1 deletion include/linux/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ struct tcp_sock {

/* Receiver queue space */
struct {
int space;
u32 space;
u32 seq;
u64 time;
} rcvq_space;
Expand Down
31 changes: 12 additions & 19 deletions net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -576,8 +576,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 copied;
int time;
int copied;

tcp_mstamp_refresh(tp);
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
Expand All @@ -600,38 +600,31 @@ void tcp_rcv_space_adjust(struct sock *sk)

if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvwin, rcvmem, rcvbuf;
int rcvmem, rcvbuf;
u64 rcvwin, grow;

/* minimal window to cope with packet losses, assuming
* steady state. Add some cushion because of small variations.
*/
rcvwin = (copied << 1) + 16 * tp->advmss;
rcvwin = ((u64)copied << 1) + 16 * tp->advmss;

/* If rate increased by 25%,
* assume slow start, rcvwin = 3 * copied
* If rate increased by 50%,
* assume sender can use 2x growth, rcvwin = 4 * copied
*/
if (copied >=
tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
if (copied >=
tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
rcvwin <<= 1;
else
rcvwin += (rcvwin >> 1);
}
/* Accommodate for sender rate increase (eg. slow start) */
grow = rcvwin * (copied - tp->rcvq_space.space);
do_div(grow, tp->rcvq_space.space);
rcvwin += (grow << 1);

rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
rcvmem += 128;

rcvbuf = min(rcvwin / tp->advmss * rcvmem,
sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
do_div(rcvwin, tp->advmss);
rcvbuf = min_t(u64, rcvwin * rcvmem,
sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
sk->sk_rcvbuf = rcvbuf;

/* Make the window clamp follow along. */
tp->window_clamp = rcvwin;
tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
}
}
tp->rcvq_space.space = copied;
Expand Down

0 comments on commit 48d79b4

Please sign in to comment.