Skip to content

Commit

Permalink
tcp: uninline tcp_prequeue()
Browse files Browse the repository at this point in the history
tcp_prequeue() became too big to be inlined.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Eric Dumazet authored and David S. Miller committed Mar 7, 2013
1 parent f3564b2 commit b2fb4f5
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 44 deletions.
45 changes: 1 addition & 44 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -1030,50 +1030,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
#endif
}

/* Packet is added to VJ-style prequeue for processing in process
* context, if a reader task is waiting. Apparently, this exciting
* idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
* failed somewhere. Latency? Burstiness? Well, at least now we will
* see, why it failed. 8)8) --ANK
*
* NOTE: is this not too big to inline?
*/
static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);

if (sysctl_tcp_low_latency || !tp->ucopy.task)
return false;

if (skb->len <= tcp_hdrlen(skb) &&
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;

__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;

BUG_ON(sock_owned_by_user(sk));

while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPPREQUEUEDROPPED);
}

tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
TCP_RTO_MAX);
}
return true;
}

extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);

#undef STATE_TRACE

Expand Down
44 changes: 44 additions & 0 deletions net/ipv4/tcp_ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -1950,6 +1950,50 @@ void tcp_v4_early_demux(struct sk_buff *skb)
}
}

/* Packet is added to VJ-style prequeue for processing in process
* context, if a reader task is waiting. Apparently, this exciting
* idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
* failed somewhere. Latency? Burstiness? Well, at least now we will
* see, why it failed. 8)8) --ANK
*
*/
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);

if (sysctl_tcp_low_latency || !tp->ucopy.task)
return false;

if (skb->len <= tcp_hdrlen(skb) &&
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;

__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;

BUG_ON(sock_owned_by_user(sk));

while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPPREQUEUEDROPPED);
}

tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
TCP_RTO_MAX);
}
return true;
}
EXPORT_SYMBOL(tcp_prequeue);

/*
* From tcp_input.c
*/
Expand Down

0 comments on commit b2fb4f5

Please sign in to comment.