Skip to content

Commit

Permalink
Merge branch 'tcp-cong-undo_cwnd-mandatory'
Browse files Browse the repository at this point in the history
Florian Westphal says:

====================
tcp: make undo_cwnd mandatory for congestion modules

highspeed, illinois, scalable, veno and yeah congestion control algorithms
don't provide a 'cwnd_undo' function.  This makes the stack default to a
'reno undo' which doubles cwnd.  However, the ssthresh implementation of
these algorithms do not halve the slowstart threshold. This causes similar
issue as the one fixed for dctcp in ce6dd23 ("dctcp: avoid bogus
doubling of cwnd after loss").

In light of this it seems better to remove the fallback and make undo_cwnd
mandatory.

First patch fixes those spots where reno undo seems incorrect by providing
.cwnd_undo functions, second patch removes the fallback.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Nov 21, 2016
2 parents 2fcb58a + e979918 commit 9e36ced
Show file tree
Hide file tree
Showing 13 changed files with 74 additions and 7 deletions.
1 change: 1 addition & 0 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -958,6 +958,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);

u32 tcp_reno_ssthresh(struct sock *sk);
u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;

Expand Down
14 changes: 12 additions & 2 deletions net/ipv4/tcp_cong.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,9 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
{
int ret = 0;

/* all algorithms must implement ssthresh and cong_avoid ops */
if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
/* all algorithms must implement these */
if (!ca->ssthresh || !ca->undo_cwnd ||
!(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
Expand Down Expand Up @@ -441,10 +442,19 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);

u32 tcp_reno_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);

return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
}
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);

struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.undo_cwnd = tcp_reno_undo_cwnd,
};
1 change: 1 addition & 0 deletions net/ipv4/tcp_dctcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,7 @@ static struct tcp_congestion_ops dctcp __read_mostly = {
static struct tcp_congestion_ops dctcp_reno __read_mostly = {
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.undo_cwnd = tcp_reno_undo_cwnd,
.get_info = dctcp_get_info,
.owner = THIS_MODULE,
.name = "dctcp-reno",
Expand Down
11 changes: 10 additions & 1 deletion net/ipv4/tcp_highspeed.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ static const struct hstcp_aimd_val {

struct hstcp {
u32 ai;
u32 loss_cwnd;
};

static void hstcp_init(struct sock *sk)
Expand Down Expand Up @@ -150,16 +151,24 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
static u32 hstcp_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct hstcp *ca = inet_csk_ca(sk);
struct hstcp *ca = inet_csk_ca(sk);

ca->loss_cwnd = tp->snd_cwnd;
/* Do multiplicative decrease */
return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
}

static u32 hstcp_cwnd_undo(struct sock *sk)
{
const struct hstcp *ca = inet_csk_ca(sk);

return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}

static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
.init = hstcp_init,
.ssthresh = hstcp_ssthresh,
.undo_cwnd = hstcp_cwnd_undo,
.cong_avoid = hstcp_cong_avoid,

.owner = THIS_MODULE,
Expand Down
1 change: 1 addition & 0 deletions net/ipv4/tcp_hybla.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
.init = hybla_init,
.ssthresh = tcp_reno_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = hybla_cong_avoid,
.set_state = hybla_state,

Expand Down
10 changes: 10 additions & 0 deletions net/ipv4/tcp_illinois.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ struct illinois {
u32 end_seq; /* right edge of current RTT */
u32 alpha; /* Additive increase */
u32 beta; /* Muliplicative decrease */
u32 loss_cwnd; /* cwnd on loss */
u16 acked; /* # packets acked by current ACK */
u8 rtt_above; /* average rtt has gone above threshold */
u8 rtt_low; /* # of rtts measurements below threshold */
Expand Down Expand Up @@ -296,10 +297,18 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk);

ca->loss_cwnd = tp->snd_cwnd;
/* Multiplicative decrease */
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
}

static u32 tcp_illinois_cwnd_undo(struct sock *sk)
{
const struct illinois *ca = inet_csk_ca(sk);

return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}

/* Extract info for Tcp socket info provided via netlink. */
static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info)
Expand Down Expand Up @@ -327,6 +336,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
.init = tcp_illinois_init,
.ssthresh = tcp_illinois_ssthresh,
.undo_cwnd = tcp_illinois_cwnd_undo,
.cong_avoid = tcp_illinois_cong_avoid,
.set_state = tcp_illinois_state,
.get_info = tcp_illinois_info,
Expand Down
5 changes: 1 addition & 4 deletions net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -2394,10 +2394,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
if (tp->prior_ssthresh) {
const struct inet_connection_sock *icsk = inet_csk(sk);

if (icsk->icsk_ca_ops->undo_cwnd)
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
else
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);

if (tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
Expand Down
1 change: 1 addition & 0 deletions net/ipv4/tcp_lp.c
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
static struct tcp_congestion_ops tcp_lp __read_mostly = {
.init = tcp_lp_init,
.ssthresh = tcp_reno_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_lp_cong_avoid,
.pkts_acked = tcp_lp_pkts_acked,

Expand Down
15 changes: 15 additions & 0 deletions net/ipv4/tcp_scalable.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@
#define TCP_SCALABLE_AI_CNT 50U
#define TCP_SCALABLE_MD_SCALE 3

struct scalable {
u32 loss_cwnd;
};

static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
Expand All @@ -32,12 +36,23 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
static u32 tcp_scalable_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct scalable *ca = inet_csk_ca(sk);

ca->loss_cwnd = tp->snd_cwnd;

return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
}

static u32 tcp_scalable_cwnd_undo(struct sock *sk)
{
const struct scalable *ca = inet_csk_ca(sk);

return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}

static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh,
.undo_cwnd = tcp_scalable_cwnd_undo,
.cong_avoid = tcp_scalable_cong_avoid,

.owner = THIS_MODULE,
Expand Down
1 change: 1 addition & 0 deletions net/ipv4/tcp_vegas.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
static struct tcp_congestion_ops tcp_vegas __read_mostly = {
.init = tcp_vegas_init,
.ssthresh = tcp_reno_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_vegas_cong_avoid,
.pkts_acked = tcp_vegas_pkts_acked,
.set_state = tcp_vegas_state,
Expand Down
10 changes: 10 additions & 0 deletions net/ipv4/tcp_veno.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ struct veno {
u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
u32 inc; /* decide whether to increase cwnd */
u32 diff; /* calculate the diff rate */
u32 loss_cwnd; /* cwnd when loss occured */
};

/* There are several situations when we must "re-start" Veno:
Expand Down Expand Up @@ -193,6 +194,7 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);

veno->loss_cwnd = tp->snd_cwnd;
if (veno->diff < beta)
/* in "non-congestive state", cut cwnd by 1/5 */
return max(tp->snd_cwnd * 4 / 5, 2U);
Expand All @@ -201,9 +203,17 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
return max(tp->snd_cwnd >> 1U, 2U);
}

static u32 tcp_veno_cwnd_undo(struct sock *sk)
{
const struct veno *veno = inet_csk_ca(sk);

return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd);
}

static struct tcp_congestion_ops tcp_veno __read_mostly = {
.init = tcp_veno_init,
.ssthresh = tcp_veno_ssthresh,
.undo_cwnd = tcp_veno_cwnd_undo,
.cong_avoid = tcp_veno_cong_avoid,
.pkts_acked = tcp_veno_pkts_acked,
.set_state = tcp_veno_state,
Expand Down
1 change: 1 addition & 0 deletions net/ipv4/tcp_westwood.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
.init = tcp_westwood_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.undo_cwnd = tcp_reno_undo_cwnd,
.cwnd_event = tcp_westwood_event,
.in_ack_event = tcp_westwood_ack,
.get_info = tcp_westwood_info,
Expand Down
10 changes: 10 additions & 0 deletions net/ipv4/tcp_yeah.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ struct yeah {
u32 fast_count;

u32 pkts_acked;
u32 loss_cwnd;
};

static void tcp_yeah_init(struct sock *sk)
Expand Down Expand Up @@ -219,13 +220,22 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)

yeah->fast_count = 0;
yeah->reno_count = max(yeah->reno_count>>1, 2U);
yeah->loss_cwnd = tp->snd_cwnd;

return max_t(int, tp->snd_cwnd - reduction, 2);
}

static u32 tcp_yeah_cwnd_undo(struct sock *sk)
{
const struct yeah *yeah = inet_csk_ca(sk);

return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd);
}

static struct tcp_congestion_ops tcp_yeah __read_mostly = {
.init = tcp_yeah_init,
.ssthresh = tcp_yeah_ssthresh,
.undo_cwnd = tcp_yeah_cwnd_undo,
.cong_avoid = tcp_yeah_cong_avoid,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
Expand Down

0 comments on commit 9e36ced

Please sign in to comment.