Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 78131
b: refs/heads/master
c: 68f8353
h: refs/heads/master
i:
  78129: b1dfbe5
  78127: 8ea7a42
v: v3
  • Loading branch information
Ilpo Järvinen authored and David S. Miller committed Jan 28, 2008
1 parent dfcf6c3 commit bc3949f
Show file tree
Hide file tree
Showing 5 changed files with 173 additions and 118 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fd6dad616d4fe2f08d690f25ca76b0102158fb3a
refs/heads/master: 68f8353b480e5f2e136c38a511abdbb88eaa8ce2
3 changes: 0 additions & 3 deletions trunk/include/linux/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -343,10 +343,7 @@ struct tcp_sock {
struct sk_buff *scoreboard_skb_hint;
struct sk_buff *retransmit_skb_hint;
struct sk_buff *forward_skb_hint;
struct sk_buff *fastpath_skb_hint;

int fastpath_cnt_hint; /* Lags behind by current skb's pcount
* compared to respective fackets_out */
int lost_cnt_hint;
int retransmit_cnt_hint;

Expand Down
1 change: 0 additions & 1 deletion trunk/include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -1081,7 +1081,6 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
{
tcp_clear_retrans_hints_partial(tp);
tp->fastpath_skb_hint = NULL;
}

/* MD5 Signature */
Expand Down
271 changes: 171 additions & 100 deletions trunk/net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -1333,6 +1333,88 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp,
return flag;
}

static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
struct tcp_sack_block *next_dup,
u32 start_seq, u32 end_seq,
int dup_sack_in, int *fack_count,
int *reord, int *flag)
{
struct tcp_sock *tp = tcp_sk(sk);

tcp_for_write_queue_from(skb, sk) {
int in_sack = 0;
int dup_sack = dup_sack_in;

if (skb == tcp_send_head(sk))
break;

/* queue is in-order => we can short-circuit the walk early */
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;

if ((next_dup != NULL) &&
before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
in_sack = tcp_match_skb_to_sack(sk, skb,
next_dup->start_seq,
next_dup->end_seq);
if (in_sack > 0)
dup_sack = 1;
}

if (in_sack <= 0)
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
if (unlikely(in_sack < 0))
break;

if (in_sack)
*flag |= tcp_sacktag_one(skb, tp, reord, dup_sack, *fack_count);

*fack_count += tcp_skb_pcount(skb);
}
return skb;
}

/* Avoid all extra work that is being done by sacktag while walking in
* a normal way
*/
static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
u32 skip_to_seq)
{
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;

if (before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
break;
}
return skb;
}

static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
struct sock *sk,
struct tcp_sack_block *next_dup,
u32 skip_to_seq,
int *fack_count, int *reord,
int *flag)
{
if (next_dup == NULL)
return skb;

if (before(next_dup->start_seq, skip_to_seq)) {
skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
tcp_sacktag_walk(skb, sk, NULL,
next_dup->start_seq, next_dup->end_seq,
1, fack_count, reord, flag);
}

return skb;
}

static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
{
return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
}

static int
tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
{
Expand All @@ -1342,16 +1424,16 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[4];
struct sk_buff *cached_skb;
struct tcp_sack_block *cache;
struct sk_buff *skb;
int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
int used_sacks;
int reord = tp->packets_out;
int flag = 0;
int found_dup_sack = 0;
int cached_fack_count;
int i;
int fack_count;
int i, j;
int first_sack_index;
int force_one_sack;

if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
Expand Down Expand Up @@ -1409,132 +1491,123 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
used_sacks++;
}

/* SACK fastpath:
* if the only SACK change is the increase of the end_seq of
* the first block then only apply that SACK block
* and use retrans queue hinting otherwise slowpath */
force_one_sack = 1;
for (i = 0; i < used_sacks; i++) {
u32 start_seq = sp[i].start_seq;
u32 end_seq = sp[i].end_seq;

if (i == 0) {
if (tp->recv_sack_cache[i].start_seq != start_seq)
force_one_sack = 0;
} else {
if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
(tp->recv_sack_cache[i].end_seq != end_seq))
force_one_sack = 0;
}
tp->recv_sack_cache[i].start_seq = start_seq;
tp->recv_sack_cache[i].end_seq = end_seq;
}
/* Clear the rest of the cache sack blocks so they won't match mistakenly. */
for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
tp->recv_sack_cache[i].start_seq = 0;
tp->recv_sack_cache[i].end_seq = 0;
}
/* order SACK blocks to allow in order walk of the retrans queue */
for (i = used_sacks - 1; i > 0; i--) {
for (j = 0; j < i; j++){
if (after(sp[j].start_seq, sp[j+1].start_seq)) {
struct tcp_sack_block tmp;

if (force_one_sack)
used_sacks = 1;
else {
int j;
tp->fastpath_skb_hint = NULL;

/* order SACK blocks to allow in order walk of the retrans queue */
for (i = used_sacks - 1; i > 0; i--) {
for (j = 0; j < i; j++){
if (after(sp[j].start_seq, sp[j+1].start_seq)) {
struct tcp_sack_block tmp;

tmp = sp[j];
sp[j] = sp[j+1];
sp[j+1] = tmp;

/* Track where the first SACK block goes to */
if (j == first_sack_index)
first_sack_index = j+1;
}
tmp = sp[j];
sp[j] = sp[j+1];
sp[j+1] = tmp;

/* Track where the first SACK block goes to */
if (j == first_sack_index)
first_sack_index = j+1;
}
}
}

/* Use SACK fastpath hint if valid */
cached_skb = tp->fastpath_skb_hint;
cached_fack_count = tp->fastpath_cnt_hint;
if (!cached_skb) {
cached_skb = tcp_write_queue_head(sk);
cached_fack_count = 0;
skb = tcp_write_queue_head(sk);
fack_count = 0;
i = 0;

if (!tp->sacked_out) {
/* It's already past, so skip checking against it */
cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
} else {
cache = tp->recv_sack_cache;
/* Skip empty blocks in at head of the cache */
while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
!cache->end_seq)
cache++;
}

for (i = 0; i < used_sacks; i++) {
struct sk_buff *skb;
while (i < used_sacks) {
u32 start_seq = sp[i].start_seq;
u32 end_seq = sp[i].end_seq;
int fack_count;
int dup_sack = (found_dup_sack && (i == first_sack_index));
int next_dup = (found_dup_sack && (i+1 == first_sack_index));
struct tcp_sack_block *next_dup = NULL;

skb = cached_skb;
fack_count = cached_fack_count;
if (found_dup_sack && ((i + 1) == first_sack_index))
next_dup = &sp[i + 1];

/* Event "B" in the comment above. */
if (after(end_seq, tp->high_seq))
flag |= FLAG_DATA_LOST;

tcp_for_write_queue_from(skb, sk) {
int in_sack = 0;

if (skb == tcp_send_head(sk))
break;

cached_skb = skb;
cached_fack_count = fack_count;
if (i == first_sack_index) {
tp->fastpath_skb_hint = skb;
tp->fastpath_cnt_hint = fack_count;
/* Skip too early cached blocks */
while (tcp_sack_cache_ok(tp, cache) &&
!before(start_seq, cache->end_seq))
cache++;

/* Can skip some work by looking recv_sack_cache? */
if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
after(end_seq, cache->start_seq)) {

/* Head todo? */
if (before(start_seq, cache->start_seq)) {
skb = tcp_sacktag_skip(skb, sk, start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq,
cache->start_seq, dup_sack,
&fack_count, &reord, &flag);
}

/* The retransmission queue is always in order, so
* we can short-circuit the walk early.
*/
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;

dup_sack = (found_dup_sack && (i == first_sack_index));
/* Rest of the block already fully processed? */
if (!after(end_seq, cache->end_seq)) {
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
&fack_count, &reord, &flag);
goto advance_sp;
}

/* Due to sorting DSACK may reside within this SACK block! */
if (next_dup) {
u32 dup_start = sp[i+1].start_seq;
u32 dup_end = sp[i+1].end_seq;
/* ...tail remains todo... */
if (TCP_SKB_CB(tp->highest_sack)->end_seq == cache->end_seq) {
/* ...but better entrypoint exists! Check that DSACKs are
* properly accounted while skipping here
*/
tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
&fack_count, &reord, &flag);

if (before(TCP_SKB_CB(skb)->seq, dup_end)) {
in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end);
if (in_sack > 0)
dup_sack = 1;
}
skb = tcp_write_queue_next(sk, tp->highest_sack);
fack_count = tp->fackets_out;
cache++;
goto walk;
}

/* DSACK info lost if out-of-mem, try SACK still */
if (in_sack <= 0)
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
if (unlikely(in_sack < 0))
break;

if (in_sack)
flag |= tcp_sacktag_one(skb, tp, &reord, dup_sack, fack_count);
skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
/* Check overlap against next cached too (past this one already) */
cache++;
continue;
}

fack_count += tcp_skb_pcount(skb);
if (!before(start_seq, tcp_highest_sack_seq(tp))) {
skb = tcp_write_queue_next(sk, tp->highest_sack);
fack_count = tp->fackets_out;
}
skb = tcp_sacktag_skip(skb, sk, start_seq);

walk:
skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
dup_sack, &fack_count, &reord, &flag);

advance_sp:
/* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
* due to in-order walk
*/
if (after(end_seq, tp->frto_highmark))
flag &= ~FLAG_ONLY_ORIG_SACKED;

i++;
}

/* Clear the head of the cache sack blocks so we can skip it next time */
for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
tp->recv_sack_cache[i].start_seq = 0;
tp->recv_sack_cache[i].end_seq = 0;
}
for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j];

flag |= tcp_mark_lost_retrans(sk);

tcp_verify_left_out(tp);
Expand Down Expand Up @@ -2821,9 +2894,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p,
}

tp->fackets_out -= min(pkts_acked, tp->fackets_out);
/* hint's skb might be NULL but we don't need to care */
tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
tp->fastpath_cnt_hint);

if (ca_ops->pkts_acked) {
s32 rtt_us = -1;

Expand Down
14 changes: 1 addition & 13 deletions trunk/net/ipv4/tcp_output.c
Original file line number Diff line number Diff line change
Expand Up @@ -653,9 +653,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
}

/* When a modification to fackets out becomes necessary, we need to check
* skb is counted to fackets_out or not. Another important thing is to
* tweak SACK fastpath hint too as it would overwrite all changes unless
* hint is also changed.
* skb is counted to fackets_out or not.
*/
static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
int decr)
Expand All @@ -667,11 +665,6 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,

if (!before(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
tp->fackets_out -= decr;

/* cnt_hint is "off-by-one" compared with fackets_out (see sacktag) */
if (tp->fastpath_skb_hint != NULL &&
after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
tp->fastpath_cnt_hint -= decr;
}

/* Function to create two new TCP segments. Shrinks the given segment
Expand Down Expand Up @@ -1753,11 +1746,6 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m

/* changed transmit queue under us so clear hints */
tcp_clear_retrans_hints_partial(tp);
/* manually tune sacktag skb hint */
if (tp->fastpath_skb_hint == next_skb) {
tp->fastpath_skb_hint = skb;
tp->fastpath_cnt_hint -= tcp_skb_pcount(skb);
}

sk_stream_free_skb(sk, next_skb);
}
Expand Down

0 comments on commit bc3949f

Please sign in to comment.