From 7e3b0bfa8df8effcafef37b77c76eccb49d8d683 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Fri, 13 Jul 2007 17:01:19 -0400 Subject: [PATCH] --- yaml --- r: 64594 b: refs/heads/master c: ea2dfb3733d53ac98b17756435d1f99e25490357 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/net/bnx2.c | 10 ++--- trunk/include/linux/rtnetlink.h | 2 - trunk/include/net/sctp/ulpqueue.h | 1 + trunk/net/core/pktgen.c | 5 +-- trunk/net/ipv4/tcp_input.c | 14 +----- trunk/net/sctp/sm_sideeffect.c | 3 ++ trunk/net/sctp/ulpqueue.c | 75 +++++++++++++++++++++++++------ 8 files changed, 75 insertions(+), 37 deletions(-) diff --git a/[refs] b/[refs] index b5493df40a27..26c89421f7cb 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 05bb1fad1cde025a864a90cfeb98dcbefe78a44a +refs/heads/master: ea2dfb3733d53ac98b17756435d1f99e25490357 diff --git a/trunk/drivers/net/bnx2.c b/trunk/drivers/net/bnx2.c index 854d80c330ec..24e7f9ab3f5a 100644 --- a/trunk/drivers/net/bnx2.c +++ b/trunk/drivers/net/bnx2.c @@ -3934,13 +3934,11 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) /* Chip reset. */ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); - /* Reading back any register after chip reset will hang the - * bus on 5706 A0 and A1. The msleep below provides plenty - * of margin for write posting. - */ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) - msleep(20); + (CHIP_ID(bp) == CHIP_ID_5706_A1)) { + current->state = TASK_UNINTERRUPTIBLE; + schedule_timeout(HZ / 50); + } /* Reset takes approximate 30 usec */ for (i = 0; i < 10; i++) { diff --git a/trunk/include/linux/rtnetlink.h b/trunk/include/linux/rtnetlink.h index dff3192374f8..c91476ce314a 100644 --- a/trunk/include/linux/rtnetlink.h +++ b/trunk/include/linux/rtnetlink.h @@ -351,8 +351,6 @@ enum #define RTAX_INITCWND RTAX_INITCWND RTAX_FEATURES, #define RTAX_FEATURES RTAX_FEATURES - RTAX_RTO_MIN, -#define RTAX_RTO_MIN RTAX_RTO_MIN __RTAX_MAX }; diff --git a/trunk/include/net/sctp/ulpqueue.h b/trunk/include/net/sctp/ulpqueue.h index 39ea3f442b47..cd33270e86dd 100644 --- a/trunk/include/net/sctp/ulpqueue.h +++ b/trunk/include/net/sctp/ulpqueue.h @@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); /* Skip over an SSN. */ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); +void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); #endif /* __sctp_ulpqueue_h__ */ diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index 4ad62d375373..7bae576ac115 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -3331,9 +3331,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) } if ((netif_queue_stopped(odev) || - (pkt_dev->skb && - netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || - need_resched()) { + netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || + need_resched()) { idle_start = getCurUs(); if (!netif_running(odev)) { diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 1ee72127462b..9785df37a65f 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -555,16 +555,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) tcp_grow_window(sk, skb); } -static u32 tcp_rto_min(struct sock *sk) -{ - struct dst_entry *dst = __sk_dst_get(sk); - u32 rto_min = TCP_RTO_MIN; - - if (dst_metric_locked(dst, RTAX_RTO_MIN)) - rto_min = dst->metrics[RTAX_RTO_MIN-1]; - return rto_min; -} - /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge @@ -626,13 +616,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) if (tp->mdev_max < tp->rttvar) tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; tp->rtt_seq = tp->snd_nxt; - tp->mdev_max = tcp_rto_min(sk); + tp->mdev_max = TCP_RTO_MIN; } } else { /* no previous measure. */ tp->srtt = m<<3; /* take the measured time to be rtt */ tp->mdev = m<<1; /* make sure rto = 3*rtt */ - tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); + tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); tp->rtt_seq = tp->snd_nxt; } } diff --git a/trunk/net/sctp/sm_sideeffect.c b/trunk/net/sctp/sm_sideeffect.c index d9fad4f6ffc3..1907318e70f1 100644 --- a/trunk/net/sctp/sm_sideeffect.c +++ b/trunk/net/sctp/sm_sideeffect.c @@ -1130,6 +1130,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, /* Move the Cumulattive TSN Ack ahead. */ sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); + /* purge the fragmentation queue */ + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); + /* Abort any in progress partial delivery. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); break; diff --git a/trunk/net/sctp/ulpqueue.c b/trunk/net/sctp/ulpqueue.c index 34eb977a204d..fa0ba2a5564e 100644 --- a/trunk/net/sctp/ulpqueue.c +++ b/trunk/net/sctp/ulpqueue.c @@ -659,6 +659,46 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u return retval; } +/* + * Flush out stale fragments from the reassembly queue when processing + * a Forward TSN. + * + * RFC 3758, Section 3.6 + * + * After receiving and processing a FORWARD TSN, the data receiver MUST + * take cautions in updating its re-assembly queue. The receiver MUST + * remove any partially reassembled message, which is still missing one + * or more TSNs earlier than or equal to the new cumulative TSN point. + * In the event that the receiver has invoked the partial delivery API, + * a notification SHOULD also be generated to inform the upper layer API + * that the message being partially delivered will NOT be completed. + */ +void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) +{ + struct sk_buff *pos, *tmp; + struct sctp_ulpevent *event; + __u32 tsn; + + if (skb_queue_empty(&ulpq->reasm)) + return; + + skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { + event = sctp_skb2event(pos); + tsn = event->tsn; + + /* Since the entire message must be abandoned by the + * sender (item A3 in Section 3.5, RFC 3758), we can + * free all fragments on the list that are less then + * or equal to ctsn_point + */ + if (TSN_lte(tsn, fwd_tsn)) { + __skb_unlink(pos, &ulpq->reasm); + sctp_ulpevent_free(event); + } else + break; + } +} + /* Helper function to gather skbs that have possibly become * ordered by an an incoming chunk. */ @@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, /* Helper function to gather skbs that have possibly become * ordered by forward tsn skipping their dependencies. */ -static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) +static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) { struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; @@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) csid = cevent->stream; cssn = cevent->ssn; - if (cssn != sctp_ssn_peek(in, csid)) + /* Have we gone too far? */ + if (csid > sid) break; - /* Found it, so mark in the ssnmap. */ - sctp_ssn_next(in, csid); + /* Have we not gone far enough? */ + if (csid < sid) + continue; + + /* see if this ssn has been marked by skipping */ + if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) + break; __skb_unlink(pos, &ulpq->lobby); - if (!event) { + if (!event) /* Create a temporary list to collect chunks on. */ event = sctp_skb2event(pos); - __skb_queue_tail(&temp, sctp_event2skb(event)); - } else { - /* Attach all gathered skbs to the event. */ - __skb_queue_tail(&temp, pos); - } + + /* Attach all gathered skbs to the event. */ + __skb_queue_tail(&temp, pos); } /* Send event to the ULP. 'event' is the sctp_ulpevent for * very first SKB on the 'temp' list. */ - if (event) + if (event) { + /* see if we have more ordered that we can deliver */ + sctp_ulpq_retrieve_ordered(ulpq, event); sctp_ulpq_tail_event(ulpq, event); + } } -/* Skip over an SSN. */ +/* Skip over an SSN. This is used during the processing of + * Forwared TSN chunk to skip over the abandoned ordered data + */ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) { struct sctp_stream *in; @@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) /* Go find any other chunks that were waiting for * ordering and deliver them if needed. */ - sctp_ulpq_reap_ordered(ulpq); + sctp_ulpq_reap_ordered(ulpq, sid); return; }