Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 64594
b: refs/heads/master
c: ea2dfb3
h: refs/heads/master
v: v3
  • Loading branch information
Vlad Yasevich committed Aug 29, 2007
1 parent 6e58854 commit 7e3b0bf
Show file tree
Hide file tree
Showing 8 changed files with 75 additions and 37 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 05bb1fad1cde025a864a90cfeb98dcbefe78a44a
refs/heads/master: ea2dfb3733d53ac98b17756435d1f99e25490357
10 changes: 4 additions & 6 deletions trunk/drivers/net/bnx2.c
Original file line number Diff line number Diff line change
Expand Up @@ -3934,13 +3934,11 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Chip reset. */
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);

/* Reading back any register after chip reset will hang the
* bus on 5706 A0 and A1. The msleep below provides plenty
* of margin for write posting.
*/
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
(CHIP_ID(bp) == CHIP_ID_5706_A1))
msleep(20);
(CHIP_ID(bp) == CHIP_ID_5706_A1)) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ / 50);
}

/* Reset takes approximate 30 usec */
for (i = 0; i < 10; i++) {
Expand Down
2 changes: 0 additions & 2 deletions trunk/include/linux/rtnetlink.h
Original file line number Diff line number Diff line change
Expand Up @@ -351,8 +351,6 @@ enum
#define RTAX_INITCWND RTAX_INITCWND
RTAX_FEATURES,
#define RTAX_FEATURES RTAX_FEATURES
RTAX_RTO_MIN,
#define RTAX_RTO_MIN RTAX_RTO_MIN
__RTAX_MAX
};

Expand Down
1 change: 1 addition & 0 deletions trunk/include/net/sctp/ulpqueue.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
/* Skip over an SSN. */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);

void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
#endif /* __sctp_ulpqueue_h__ */


Expand Down
5 changes: 2 additions & 3 deletions trunk/net/core/pktgen.c
Original file line number Diff line number Diff line change
Expand Up @@ -3331,9 +3331,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}

if ((netif_queue_stopped(odev) ||
(pkt_dev->skb &&
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
need_resched()) {
netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
need_resched()) {
idle_start = getCurUs();

if (!netif_running(odev)) {
Expand Down
14 changes: 2 additions & 12 deletions trunk/net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -555,16 +555,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
tcp_grow_window(sk, skb);
}

static u32 tcp_rto_min(struct sock *sk)
{
struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;

if (dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst->metrics[RTAX_RTO_MIN-1];
return rto_min;
}

/* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge
Expand Down Expand Up @@ -626,13 +616,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
if (tp->mdev_max < tp->rttvar)
tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
tp->rtt_seq = tp->snd_nxt;
tp->mdev_max = tcp_rto_min(sk);
tp->mdev_max = TCP_RTO_MIN;
}
} else {
/* no previous measure. */
tp->srtt = m<<3; /* take the measured time to be rtt */
tp->mdev = m<<1; /* make sure rto = 3*rtt */
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
tp->rtt_seq = tp->snd_nxt;
}
}
Expand Down
3 changes: 3 additions & 0 deletions trunk/net/sctp/sm_sideeffect.c
Original file line number Diff line number Diff line change
Expand Up @@ -1130,6 +1130,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);

/* purge the fragmentation queue */
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);

/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break;
Expand Down
75 changes: 62 additions & 13 deletions trunk/net/sctp/ulpqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,46 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
return retval;
}

/*
* Flush out stale fragments from the reassembly queue when processing
* a Forward TSN.
*
* RFC 3758, Section 3.6
*
* After receiving and processing a FORWARD TSN, the data receiver MUST
* take cautions in updating its re-assembly queue. The receiver MUST
* remove any partially reassembled message, which is still missing one
* or more TSNs earlier than or equal to the new cumulative TSN point.
* In the event that the receiver has invoked the partial delivery API,
* a notification SHOULD also be generated to inform the upper layer API
* that the message being partially delivered will NOT be completed.
*/
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *event;
__u32 tsn;

if (skb_queue_empty(&ulpq->reasm))
return;

skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
event = sctp_skb2event(pos);
tsn = event->tsn;

/* Since the entire message must be abandoned by the
* sender (item A3 in Section 3.5, RFC 3758), we can
* free all fragments on the list that are less then
* or equal to ctsn_point
*/
if (TSN_lte(tsn, fwd_tsn)) {
__skb_unlink(pos, &ulpq->reasm);
sctp_ulpevent_free(event);
} else
break;
}
}

/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
Expand Down Expand Up @@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
Expand All @@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
csid = cevent->stream;
cssn = cevent->ssn;

if (cssn != sctp_ssn_peek(in, csid))
/* Have we gone too far? */
if (csid > sid)
break;

/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, csid);
/* Have we not gone far enough? */
if (csid < sid)
continue;

/* see if this ssn has been marked by skipping */
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
break;

__skb_unlink(pos, &ulpq->lobby);
if (!event) {
if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
__skb_queue_tail(&temp, sctp_event2skb(event));
} else {
/* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos);
}

/* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos);
}

/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event)
if (event) {
/* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
}
}

/* Skip over an SSN. */
/* Skip over an SSN. This is used during the processing of
* Forwared TSN chunk to skip over the abandoned ordered data
*/
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
Expand All @@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq);
sctp_ulpq_reap_ordered(ulpq, sid);
return;
}

Expand Down

0 comments on commit 7e3b0bf

Please sign in to comment.