Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 203401
b: refs/heads/master
c: acbc0f0
h: refs/heads/master
i:
  203399: 7e5fbe5
v: v3
  • Loading branch information
Eran Liberty authored and David S. Miller committed Jul 7, 2010
1 parent 1ead1c3 commit 65404bd
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f29a3d040727a80c3307a2bea057206be049c305
refs/heads/master: acbc0f039ff4b93da737c91937b7c70018ded39f
54 changes: 27 additions & 27 deletions trunk/drivers/net/gianfar.c
Original file line number Diff line number Diff line change
Expand Up @@ -2420,6 +2420,15 @@ static void gfar_timeout(struct net_device *dev)
schedule_work(&priv->reset_task);
}

static void gfar_align_skb(struct sk_buff *skb)
{
/* We need the data buffer to be aligned properly. We will reserve
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, RXBUF_ALIGNMENT -
(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
}

/* Interrupt Handler for Transmit complete */
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
Expand Down Expand Up @@ -2504,9 +2513,10 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
*/
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT))
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
__skb_queue_head(&priv->rx_recycle, skb);
else
} else
dev_kfree_skb_any(skb);

tx_queue->tx_skbuff[skb_dirtytx] = NULL;
Expand Down Expand Up @@ -2569,29 +2579,28 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_init_rxbdp(rx_queue, bdp, buf);
}


struct sk_buff * gfar_new_skb(struct net_device *dev)
static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
{
unsigned int alignamount;
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;

skb = __skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = netdev_alloc_skb(dev,
priv->rx_buffer_size + RXBUF_ALIGNMENT);

skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb)
return NULL;

alignamount = RXBUF_ALIGNMENT -
(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
gfar_align_skb(skb);

/* We need the data buffer to be aligned properly. We will reserve
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, alignamount);
GFAR_CB(skb)->alignamount = alignamount;
return skb;
}

struct sk_buff * gfar_new_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;

skb = __skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);

return skb;
}
Expand Down Expand Up @@ -2744,17 +2753,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)

if (unlikely(!newskb))
newskb = skb;
else if (skb) {
/*
* We need to un-reserve() the skb to what it
* was before gfar_new_skb() re-aligned
* it to an RXBUF_ALIGNMENT boundary
* before we put the skb back on the
* recycle list.
*/
skb_reserve(skb, -GFAR_CB(skb)->alignamount);
else if (skb)
__skb_queue_head(&priv->rx_recycle, skb);
}
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
Expand Down

0 comments on commit 65404bd

Please sign in to comment.