Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 66254
b: refs/heads/master
c: 4ddf667
h: refs/heads/master
v: v3
  • Loading branch information
Ilpo Järvinen authored and David S. Miller committed Oct 10, 2007
1 parent 7af4e66 commit 9230c48
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 52 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d06e021d71d95aae402340dc3d9f79f9c8ad11d7
refs/heads/master: 4ddf66769d2df868071420e2e0106746c6204ea3
98 changes: 47 additions & 51 deletions trunk/net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -1314,6 +1314,53 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* F-RTO can only be used if TCP has never retransmitted anything other than
* head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
*/
static void tcp_check_reno_reordering(struct sock *sk, const int addend)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 holes;

holes = max(tp->lost_out, 1U);
holes = min(holes, tp->packets_out);

if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
tcp_update_reordering(sk, tp->packets_out + addend, 0);
}
}

/* Emulate SACKs for SACKless connection: account for a new dupack. */

static void tcp_add_reno_sack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->sacked_out++;
tcp_check_reno_reordering(sk, 0);
tcp_sync_left_out(tp);
}

/* Account for ACK, ACKing some data in Reno Recovery phase. */

static void tcp_remove_reno_sacks(struct sock *sk, int acked)
{
struct tcp_sock *tp = tcp_sk(sk);

if (acked > 0) {
/* One ACK acked hole. The rest eat duplicate ACKs. */
if (acked-1 >= tp->sacked_out)
tp->sacked_out = 0;
else
tp->sacked_out -= acked-1;
}
tcp_check_reno_reordering(sk, acked);
tcp_sync_left_out(tp);
}

static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
{
tp->sacked_out = 0;
tp->left_out = tp->lost_out;
}

int tcp_use_frto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
Expand Down Expand Up @@ -1730,57 +1777,6 @@ static int tcp_time_to_recover(struct sock *sk)
return 0;
}

/* If we receive more dupacks than we expected counting segments
* in assumption of absent reordering, interpret this as reordering.
* The only another reason could be bug in receiver TCP.
*/
static void tcp_check_reno_reordering(struct sock *sk, const int addend)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 holes;

holes = max(tp->lost_out, 1U);
holes = min(holes, tp->packets_out);

if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
tcp_update_reordering(sk, tp->packets_out + addend, 0);
}
}

/* Emulate SACKs for SACKless connection: account for a new dupack. */

static void tcp_add_reno_sack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->sacked_out++;
tcp_check_reno_reordering(sk, 0);
tcp_sync_left_out(tp);
}

/* Account for ACK, ACKing some data in Reno Recovery phase. */

static void tcp_remove_reno_sacks(struct sock *sk, int acked)
{
struct tcp_sock *tp = tcp_sk(sk);

if (acked > 0) {
/* One ACK acked hole. The rest eat duplicate ACKs. */
if (acked-1 >= tp->sacked_out)
tp->sacked_out = 0;
else
tp->sacked_out -= acked-1;
}
tcp_check_reno_reordering(sk, acked);
tcp_sync_left_out(tp);
}

static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
{
tp->sacked_out = 0;
tp->left_out = tp->lost_out;
}

/* RFC: This is from the original, I doubt that this is necessary at all:
* clear xmit_retrans hint if seq of this skb is beyond hint. How could we
* retransmitted past LOST markings in the first place? I'm not fully sure
Expand Down

0 comments on commit 9230c48

Please sign in to comment.