diff --git a/[refs] b/[refs] index a48fc0842297..91cb5805b0b2 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 56079431b6ba163df8ba26b3eccc82379f0c0ce4 +refs/heads/master: c08e49611a8b4e38a75bf217e1029a48faf10b82 diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index a96ea7dd0fc1..ed2afdb9ea2d 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -385,7 +385,21 @@ int sock_setsockopt(struct socket *sock, int level, int optname, val = sysctl_rmem_max; set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - /* FIXME: is this lower bound the right one? */ + /* + * We double it on the way in to account for + * "struct sk_buff" etc. overhead. Applications + * assume that the SO_RCVBUF setting they make will + * allow that much actual data to be received on that + * socket. + * + * Applications are unaware that "struct sk_buff" and + * other overheads allocate from the receive buffer + * during socket buffer allocation. + * + * And after considering the possible alternatives, + * returning the value we actually used in getsockopt + * is the most desirable behavior. + */ if ((val * 2) < SOCK_MIN_RCVBUF) sk->sk_rcvbuf = SOCK_MIN_RCVBUF; else