Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 122129
b: refs/heads/master
c: 832d11c
h: refs/heads/master
i:
  122127: 6390ae4
v: v3
  • Loading branch information
Ilpo Järvinen authored and David S. Miller committed Nov 25, 2008
1 parent 20a60dc commit 4bede83
Show file tree
Hide file tree
Showing 5 changed files with 428 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f58b22fd3c16444edc393a217a74208f1894b601
refs/heads/master: 832d11c5cd076abc0aa1eaf7be96c81d1a59ce41
33 changes: 33 additions & 0 deletions trunk/include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,19 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
return (skb->next == (struct sk_buff *) list);
}

/**
* skb_queue_is_first - check if skb is the first entry in the queue
* @list: queue head
* @skb: buffer
*
* Returns true if @skb is the first buffer on the list.
*/
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return (skb->prev == (struct sk_buff *) list);
}

/**
* skb_queue_next - return the next packet in the queue
* @list: queue head
Expand All @@ -510,6 +523,24 @@ static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
return skb->next;
}

/**
* skb_queue_prev - return the prev packet in the queue
* @list: queue head
* @skb: current buffer
*
* Return the prev packet in @list before @skb. It is only valid to
* call this if skb_queue_is_first() evaluates to false.
*/
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
/* This BUG_ON may seem severe, but if we just return then we
* are going to dereference garbage.
*/
BUG_ON(skb_queue_is_first(list, skb));
return skb->prev;
}

/**
* skb_get - reference buffer
* @skb: buffer to reference
Expand Down Expand Up @@ -1652,6 +1683,8 @@ extern int skb_splice_bits(struct sk_buff *skb,
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);

extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);

Expand Down
5 changes: 5 additions & 0 deletions trunk/include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -1192,6 +1192,11 @@ static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_bu
return skb_queue_next(&sk->sk_write_queue, skb);
}

static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
{
return skb_queue_prev(&sk->sk_write_queue, skb);
}

#define tcp_for_write_queue(skb, sk) \
skb_queue_walk(&(sk)->sk_write_queue, skb)

Expand Down
140 changes: 140 additions & 0 deletions trunk/net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -2018,6 +2018,146 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
skb_split_no_header(skb, skb1, len, pos);
}

/* Shifting from/to a cloned skb is a no-go.
*
* TODO: handle cloned skbs by using pskb_expand_head()
*/
static int skb_prepare_for_shift(struct sk_buff *skb)
{
return skb_cloned(skb);
}

/**
* skb_shift - Shifts paged data partially from skb to another
* @tgt: buffer into which tail data gets added
* @skb: buffer from which the paged data comes from
* @shiftlen: shift up to this many bytes
*
* Attempts to shift up to shiftlen worth of bytes, which may be less than
* the length of the skb, from tgt to skb. Returns number bytes shifted.
* It's up to caller to free skb if everything was shifted.
*
* If @tgt runs out of frags, the whole operation is aborted.
*
* Skb cannot include anything else but paged data while tgt is allowed
* to have non-paged data as well.
*
* TODO: full sized shift could be optimized but that would need
* specialized skb free'er to handle frags without up-to-date nr_frags.
*/
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
{
int from, to, merge, todo;
struct skb_frag_struct *fragfrom, *fragto;

BUG_ON(shiftlen > skb->len);
BUG_ON(skb_headlen(skb)); /* Would corrupt stream */

todo = shiftlen;
from = 0;
to = skb_shinfo(tgt)->nr_frags;
fragfrom = &skb_shinfo(skb)->frags[from];

/* Actual merge is delayed until the point when we know we can
* commit all, so that we don't have to undo partial changes
*/
if (!to ||
!skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
merge = -1;
} else {
merge = to - 1;

todo -= fragfrom->size;
if (todo < 0) {
if (skb_prepare_for_shift(skb) ||
skb_prepare_for_shift(tgt))
return 0;

fragto = &skb_shinfo(tgt)->frags[merge];

fragto->size += shiftlen;
fragfrom->size -= shiftlen;
fragfrom->page_offset += shiftlen;

goto onlymerged;
}

from++;
}

/* Skip full, not-fitting skb to avoid expensive operations */
if ((shiftlen == skb->len) &&
(skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
return 0;

if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
return 0;

while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
if (to == MAX_SKB_FRAGS)
return 0;

fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[to];

if (todo >= fragfrom->size) {
*fragto = *fragfrom;
todo -= fragfrom->size;
from++;
to++;

} else {
get_page(fragfrom->page);
fragto->page = fragfrom->page;
fragto->page_offset = fragfrom->page_offset;
fragto->size = todo;

fragfrom->page_offset += todo;
fragfrom->size -= todo;
todo = 0;

to++;
break;
}
}

/* Ready to "commit" this state change to tgt */
skb_shinfo(tgt)->nr_frags = to;

if (merge >= 0) {
fragfrom = &skb_shinfo(skb)->frags[0];
fragto = &skb_shinfo(tgt)->frags[merge];

fragto->size += fragfrom->size;
put_page(fragfrom->page);
}

/* Reposition in the original skb */
to = 0;
while (from < skb_shinfo(skb)->nr_frags)
skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
skb_shinfo(skb)->nr_frags = to;

BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);

onlymerged:
/* Most likely the tgt won't ever need its checksum anymore, skb on
* the other hand might need it if it needs to be resent
*/
tgt->ip_summed = CHECKSUM_PARTIAL;
skb->ip_summed = CHECKSUM_PARTIAL;

/* Yak, is it really working this way? Some helper please? */
skb->len -= shiftlen;
skb->data_len -= shiftlen;
skb->truesize -= shiftlen;
tgt->len += shiftlen;
tgt->data_len += shiftlen;
tgt->truesize += shiftlen;

return shiftlen;
}

/**
* skb_prepare_seq_read - Prepare a sequential read of skb data
* @skb: the buffer to read
Expand Down
Loading

0 comments on commit 4bede83

Please sign in to comment.