Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 256149
b: refs/heads/master
c: a6686f2
h: refs/heads/master
i:
  256147: 08b94f7
v: v3
  • Loading branch information
Shirley Ma authored and David S. Miller committed Jul 7, 2011
1 parent e88dc9d commit 31724d2
Show file tree
Hide file tree
Showing 3 changed files with 96 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1cdebb423202e255366a321814fc6df079802a0d
refs/heads/master: a6686f2f382b13f8a7253401a66690c3633b6a74
16 changes: 16 additions & 0 deletions trunk/include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,20 @@ enum {

/* ensure the originating sk reference is available on driver level */
SKBTX_DRV_NEEDS_SK_REF = 1 << 3,

/* device driver supports TX zero-copy buffers */
SKBTX_DEV_ZEROCOPY = 1 << 4,
};

/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
* The desc is used to track userspace buffer index.
*/
struct ubuf_info {
void (*callback)(void *);
void *arg;
unsigned long desc;
};

/* This data is invariant across clones and lives at
Expand All @@ -211,6 +225,7 @@ struct skb_shared_info {
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
void * destructor_arg;

/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
};
Expand Down Expand Up @@ -2265,5 +2280,6 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
}

bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);

#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
80 changes: 79 additions & 1 deletion trunk/net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,18 @@ static void skb_release_data(struct sk_buff *skb)
put_page(skb_shinfo(skb)->frags[i].page);
}

/*
* If skb buf is from userspace, we need to notify the caller
* the lower device DMA has done;
*/
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
struct ubuf_info *uarg;

uarg = skb_shinfo(skb)->destructor_arg;
if (uarg->callback)
uarg->callback(uarg);
}

if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);

Expand Down Expand Up @@ -481,6 +493,9 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size)
if (irqs_disabled())
return false;

if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
return false;

if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
return false;

Expand Down Expand Up @@ -596,6 +611,51 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
}
EXPORT_SYMBOL_GPL(skb_morph);

/* skb frags copy userspace buffers to kernel */
static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int i;
int num_frags = skb_shinfo(skb)->nr_frags;
struct page *page, *head = NULL;
struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;

for (i = 0; i < num_frags; i++) {
u8 *vaddr;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];

page = alloc_page(GFP_ATOMIC);
if (!page) {
while (head) {
struct page *next = (struct page *)head->private;
put_page(head);
head = next;
}
return -ENOMEM;
}
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(page_address(page),
vaddr + f->page_offset, f->size);
kunmap_skb_frag(vaddr);
page->private = (unsigned long)head;
head = page;
}

/* skb frags release userspace buffers */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
put_page(skb_shinfo(skb)->frags[i].page);

uarg->callback(uarg);

/* skb frags point to kernel buffers */
for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
skb_shinfo(skb)->frags[i - 1].page_offset = 0;
skb_shinfo(skb)->frags[i - 1].page = head;
head = (struct page *)head->private;
}
return 0;
}


/**
* skb_clone - duplicate an sk_buff
* @skb: buffer to clone
Expand All @@ -614,6 +674,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;

if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
return NULL;
}

n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
n->fclone == SKB_FCLONE_UNAVAILABLE) {
Expand Down Expand Up @@ -731,6 +796,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shinfo(skb)->nr_frags) {
int i;

if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask)) {
kfree(n);
goto out;
}
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
get_page(skb_shinfo(n)->frags[i].page);
Expand Down Expand Up @@ -788,7 +859,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
fastpath = true;
else {
int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;

fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
}

Expand Down Expand Up @@ -819,6 +889,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (fastpath) {
kfree(skb->head);
} else {
/* copy this zero copy skb frags */
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
goto nofrags;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page);

Expand Down Expand Up @@ -853,6 +928,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0;

nofrags:
kfree(data);
nodata:
return -ENOMEM;
}
Expand Down Expand Up @@ -1354,6 +1431,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
}
start = end;
}

if (!len)
return 0;

Expand Down

0 comments on commit 31724d2

Please sign in to comment.