Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 368127
b: refs/heads/master
c: b2fb4f5
h: refs/heads/master
i:
  368125: b5803e0
  368123: 0a87ced
  368119: 8db7421
  368111: 68909b5
  368095: b27e247
  368063: 6829336
  367999: 797b2ab
  367871: 32a6862
  367615: 2f1afb9
v: v3
  • Loading branch information
Eric Dumazet authored and David S. Miller committed Mar 7, 2013
1 parent 7fc3423 commit ce02e51
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 45 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f3564b2bb5f86f42b8a068751551b6bd01325d9c
refs/heads/master: b2fb4f54ecd47c42413d54b4666b06cf93c05abf
45 changes: 1 addition & 44 deletions trunk/include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -1030,50 +1030,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
#endif
}

/* Packet is added to VJ-style prequeue for processing in process
* context, if a reader task is waiting. Apparently, this exciting
* idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
* failed somewhere. Latency? Burstiness? Well, at least now we will
* see, why it failed. 8)8) --ANK
*
* NOTE: is this not too big to inline?
*/
static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);

if (sysctl_tcp_low_latency || !tp->ucopy.task)
return false;

if (skb->len <= tcp_hdrlen(skb) &&
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;

__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;

BUG_ON(sock_owned_by_user(sk));

while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPPREQUEUEDROPPED);
}

tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
TCP_RTO_MAX);
}
return true;
}

extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);

#undef STATE_TRACE

Expand Down
44 changes: 44 additions & 0 deletions trunk/net/ipv4/tcp_ipv4.c
Original file line number Diff line number Diff line change
Expand Up @@ -1950,6 +1950,50 @@ void tcp_v4_early_demux(struct sk_buff *skb)
}
}

/* Packet is added to VJ-style prequeue for processing in process
* context, if a reader task is waiting. Apparently, this exciting
* idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
* failed somewhere. Latency? Burstiness? Well, at least now we will
* see, why it failed. 8)8) --ANK
*
*/
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);

if (sysctl_tcp_low_latency || !tp->ucopy.task)
return false;

if (skb->len <= tcp_hdrlen(skb) &&
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;

__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {
struct sk_buff *skb1;

BUG_ON(sock_owned_by_user(sk));

while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPPREQUEUEDROPPED);
}

tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
TCP_RTO_MAX);
}
return true;
}
EXPORT_SYMBOL(tcp_prequeue);

/*
* From tcp_input.c
*/
Expand Down

0 comments on commit ce02e51

Please sign in to comment.