Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 88360
b: refs/heads/master
c: b000cd3
h: refs/heads/master
v: v3
  • Loading branch information
Vitaliy Gusev authored and David S. Miller committed Apr 15, 2008
1 parent 604ff5b commit c0e44cd
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 066a3b5b2346febf9a655b444567b7138e3bb939
refs/heads/master: b000cd3707e7b25d76745f9c0e261c23d21fa578
72 changes: 46 additions & 26 deletions trunk/net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -3841,8 +3841,26 @@ static void tcp_ofo_queue(struct sock *sk)
}
}

static void tcp_prune_ofo_queue(struct sock *sk);
static int tcp_prune_queue(struct sock *sk);

static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, size)) {

if (tcp_prune_queue(sk) < 0)
return -1;

if (!sk_rmem_schedule(sk, size)) {
tcp_prune_ofo_queue(sk);
if (!sk_rmem_schedule(sk, size))
return -1;
}
}
return 0;
}

static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
Expand Down Expand Up @@ -3892,12 +3910,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (eaten <= 0) {
queue_and_out:
if (eaten < 0 &&
(atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, skb->truesize))) {
if (tcp_prune_queue(sk) < 0 ||
!sk_rmem_schedule(sk, skb->truesize))
goto drop;
}
tcp_try_rmem_schedule(sk, skb->truesize))
goto drop;

skb_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
Expand Down Expand Up @@ -3966,12 +3981,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)

TCP_ECN_check_ce(tp, skb);

if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, skb->truesize)) {
if (tcp_prune_queue(sk) < 0 ||
!sk_rmem_schedule(sk, skb->truesize))
goto drop;
}
if (tcp_try_rmem_schedule(sk, skb->truesize))
goto drop;

/* Disable header prediction. */
tp->pred_flags = 0;
Expand Down Expand Up @@ -4198,6 +4209,28 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
}
}

/*
* Purge the out-of-order queue.
*/
static void tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);

if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
__skb_queue_purge(&tp->out_of_order_queue);

/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
* is in a sad state like this, we care only about integrity
* of the connection not performance.
*/
if (tp->rx_opt.sack_ok)
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
}
}

/* Reduce allocated memory if we can, trying to get
* the socket within its memory limits again.
*
Expand Down Expand Up @@ -4231,20 +4264,7 @@ static int tcp_prune_queue(struct sock *sk)
/* Collapsing did not help, destructive actions follow.
* This must not ever occur. */

/* First, purge the out_of_order queue. */
if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
__skb_queue_purge(&tp->out_of_order_queue);

/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
* is in a sad state like this, we care only about integrity
* of the connection not performance.
*/
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
}
tcp_prune_ofo_queue(sk);

if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
Expand Down

0 comments on commit c0e44cd

Please sign in to comment.