Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 88255
b: refs/heads/master
c: 882beba
h: refs/heads/master
i:
  88253: 72acfa3
  88251: a11c6c6
  88247: 208c7a5
  88239: 74ee94e
  88223: 4e3fc94
  88191: 4dc3125
v: v3
  • Loading branch information
Ilpo Järvinen authored and David S. Miller committed Apr 8, 2008
1 parent 1adc1c1 commit 12de798
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c137f3dda04b0aee1bc6889cdc69185f53df8a82
refs/heads/master: 882bebaaca4bb1484078d44ef011f918c0e1e14e
2 changes: 2 additions & 0 deletions trunk/include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -752,6 +752,8 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
}

extern int tcp_limit_reno_sacked(struct tcp_sock *tp);

/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
Expand Down
24 changes: 18 additions & 6 deletions trunk/net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -1625,22 +1625,32 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
return flag;
}

/* If we receive more dupacks than we expected counting segments
* in assumption of absent reordering, interpret this as reordering.
* The only another reason could be bug in receiver TCP.
/* Limits sacked_out so that sum with lost_out isn't ever larger than
* packets_out. Returns zero if sacked_out adjustement wasn't necessary.
*/
static void tcp_check_reno_reordering(struct sock *sk, const int addend)
int tcp_limit_reno_sacked(struct tcp_sock *tp)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 holes;

holes = max(tp->lost_out, 1U);
holes = min(holes, tp->packets_out);

if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
tcp_update_reordering(sk, tp->packets_out + addend, 0);
return 1;
}
return 0;
}

/* If we receive more dupacks than we expected counting segments
* in assumption of absent reordering, interpret this as reordering.
* The only another reason could be bug in receiver TCP.
*/
static void tcp_check_reno_reordering(struct sock *sk, const int addend)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_limit_reno_sacked(tp))
tcp_update_reordering(sk, tp->packets_out + addend, 0);
}

/* Emulate SACKs for SACKless connection: account for a new dupack. */
Expand Down Expand Up @@ -2600,6 +2610,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
icsk->icsk_retransmits = 0;
if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
if (!tcp_try_undo_loss(sk)) {
tcp_moderate_cwnd(tp);
tcp_xmit_retransmit_queue(sk);
Expand Down
3 changes: 3 additions & 0 deletions trunk/net/ipv4/tcp_output.c
Original file line number Diff line number Diff line change
Expand Up @@ -1808,6 +1808,9 @@ void tcp_simple_retransmit(struct sock *sk)
if (!lost)
return;

if (tcp_is_reno(tp))
tcp_limit_reno_sacked(tp);

tcp_verify_left_out(tp);

/* Don't muck with the congestion window here.
Expand Down

0 comments on commit 12de798

Please sign in to comment.