Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 360783
b: refs/heads/master
c: d003b41
h: refs/heads/master
i:
  360781: a21dfa6
  360779: c8fc876
  360775: 039814c
  360767: cd9e66b
v: v3
  • Loading branch information
Lee A. Roberts authored and David S. Miller committed Feb 28, 2013
1 parent 9338311 commit 159e91f
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 95ac7b859f508b1b3e6adf7dce307864e4384a69
refs/heads/master: d003b41b801124b96337973b01eada6a83673d23
54 changes: 43 additions & 11 deletions trunk/net/sctp/ulpqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
{
struct sk_buff_head temp;
struct sctp_ulpevent *event;
int event_eor = 0;

/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
Expand All @@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event)
if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_ulpq_tail_event(ulpq, event);
}

return 0;
return event_eor;
}

/* Add a new event for propagation to the ULP. */
Expand Down Expand Up @@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
ctsn = cevent->tsn;

switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (!first_frag)
return NULL;
goto done;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else if (next_tsn == ctsn)
} else if (next_tsn == ctsn) {
next_tsn++;
else
last_frag = pos;
} else
goto done;
break;
case SCTP_DATA_LAST_FRAG:
Expand Down Expand Up @@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
} else
goto done;
break;

case SCTP_DATA_LAST_FRAG:
if (!first_frag)
return NULL;
else
goto done;
break;

default:
return NULL;
}
Expand Down Expand Up @@ -1025,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event;
struct sctp_association *asoc;
struct sctp_sock *sp;
__u32 ctsn;
struct sk_buff *skb;

asoc = ulpq->asoc;
sp = sctp_sk(asoc->base.sk);

/* If the association is already in Partial Delivery mode
* we have noting to do.
* we have nothing to do.
*/
if (ulpq->pd_mode)
return;

/* Data must be at or below the Cumulative TSN ACK Point to
* start partial delivery.
*/
skb = skb_peek(&asoc->ulpq.reasm);
if (skb != NULL) {
ctsn = sctp_skb2event(skb)->tsn;
if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
return;
}

/* If the user enabled fragment interleave socket option,
* multiple associations can enter partial delivery.
* Otherwise, we can only enter partial delivery if the
Expand Down Expand Up @@ -1077,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
sctp_ulpq_tail_data(ulpq, chunk, gfp);

sctp_ulpq_partial_delivery(ulpq, gfp);
int retval;
retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
/*
* Enter partial delivery if chunk has not been
* delivered; otherwise, drain the reassembly queue.
*/
if (retval <= 0)
sctp_ulpq_partial_delivery(ulpq, gfp);
else if (retval == 1)
sctp_ulpq_reasm_drain(ulpq);
}

sk_mem_reclaim(asoc->base.sk);
Expand Down

0 comments on commit 159e91f

Please sign in to comment.