Skip to content

Commit

Permalink
[TCP]: add tcp_slow_start helper
Browse files Browse the repository at this point in the history
Move all the code that does linear TCP slowstart to one
inline function to ease later patch to add ABC support.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Stephen Hemminger authored and David S. Miller committed Nov 11, 2005
1 parent 2d2abba commit 7faffa1
Show file tree
Hide file tree
Showing 7 changed files with 43 additions and 59 deletions.
10 changes: 10 additions & 0 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,16 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
(tp->snd_cwnd >> 2)));
}

/*
* Linear increase during slow start
*/
static inline void tcp_slow_start(struct tcp_sock *tp)
{
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}


static inline void tcp_sync_left_out(struct tcp_sock *tp)
{
if (tp->rx_opt.sack_ok &&
Expand Down
10 changes: 4 additions & 6 deletions net/ipv4/tcp_bic.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,14 +220,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack,
if (!tcp_is_cwnd_limited(sk, in_flight))
return;

if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* In "safe" area, increase. */
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
} else {
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
bictcp_update(ca, tp->snd_cwnd);

/* In dangerous area, increase slowly.
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
*/
if (tp->snd_cwnd_cnt >= ca->cnt) {
Expand Down
11 changes: 5 additions & 6 deletions net/ipv4/tcp_cong.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,12 +189,11 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
if (!tcp_is_cwnd_limited(sk, in_flight))
return;

if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* In "safe" area, increase. */
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
} else {
/* In dangerous area, increase slowly.
/* In "safe" area, increase. */
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
*/
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
Expand Down
7 changes: 3 additions & 4 deletions net/ipv4/tcp_highspeed.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,9 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
if (!tcp_is_cwnd_limited(sk, in_flight))
return;

if (tp->snd_cwnd <= tp->snd_ssthresh) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
} else {
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
/* Update AIMD parameters */
if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
Expand Down
11 changes: 5 additions & 6 deletions net/ipv4/tcp_htcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,11 +210,10 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
if (!tcp_is_cwnd_limited(sk, in_flight))
return;

if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* In "safe" area, increase. */
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
} else {
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {

measure_rtt(sk);

/* keep track of number of round-trip times since last backoff event */
Expand All @@ -224,7 +223,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
htcp_alpha_update(ca);
}

/* In dangerous area, increase slowly.
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
*/
if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) {
Expand Down
11 changes: 5 additions & 6 deletions net/ipv4/tcp_scalable.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,16 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
if (!tcp_is_cwnd_limited(sk, in_flight))
return;

if (tp->snd_cwnd <= tp->snd_ssthresh) {
tp->snd_cwnd++;
} else {
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
tp->snd_cwnd_cnt++;
if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
tp->snd_cwnd++;
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0;
}
}
tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
tp->snd_cwnd_stamp = tcp_time_stamp;
}

static u32 tcp_scalable_ssthresh(struct sock *sk)
Expand Down
42 changes: 11 additions & 31 deletions net/ipv4/tcp_vegas.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,8 +236,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
/* We don't have enough RTT samples to do the Vegas
* calculation, so we'll behave like Reno.
*/
if (tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd++;
tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, cnt);
} else {
u32 rtt, target_cwnd, diff;

Expand Down Expand Up @@ -275,7 +274,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
*/
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;

if (tp->snd_cwnd < tp->snd_ssthresh) {
if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
if (diff > gamma) {
/* Going too fast. Time to slow down
Expand All @@ -295,6 +294,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
V_PARAM_SHIFT)+1);

}
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
u32 next_snd_cwnd;
Expand Down Expand Up @@ -327,37 +327,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
else if (next_snd_cwnd < tp->snd_cwnd)
tp->snd_cwnd--;
}
}

/* Wipe the slate clean for the next RTT. */
vegas->cntRTT = 0;
vegas->minRTT = 0x7fffffff;
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
tp->snd_cwnd = tp->snd_cwnd_clamp;
}
}

/* The following code is executed for every ack we receive,
* except for conditions checked in should_advance_cwnd()
* before the call to tcp_cong_avoid(). Mainly this means that
* we only execute this code if the ack actually acked some
* data.
*/

/* If we are in slow start, increase our cwnd in response to this ACK.
* (If we are not in slow start then we are in congestion avoidance,
* and adjust our congestion window only once per RTT. See the code
* above.)
*/
if (tp->snd_cwnd <= tp->snd_ssthresh)
tp->snd_cwnd++;

/* to keep cwnd from growing without bound */
tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);

/* Make sure that we are never so timid as to reduce our cwnd below
* 2 MSS.
*
* Going below 2 MSS would risk huge delayed ACKs from our receiver.
*/
tp->snd_cwnd = max(tp->snd_cwnd, 2U);
/* Wipe the slate clean for the next RTT. */
vegas->cntRTT = 0;
vegas->minRTT = 0x7fffffff;
}

/* Extract info for Tcp socket info provided via netlink. */
Expand Down

0 comments on commit 7faffa1

Please sign in to comment.