Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 170662
b: refs/heads/master
c: 826aa4a
h: refs/heads/master
v: v3
  • Loading branch information
Anton Vorontsov authored and David S. Miller committed Oct 13, 2009
1 parent 9063d1b commit d28fbeb
Show file tree
Hide file tree
Showing 2 changed files with 177 additions and 159 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 14231176b0dc358f8693f25b62017d222dd995e6
refs/heads/master: 826aa4a05669a46e435f65db901186e42bb43d8d
334 changes: 176 additions & 158 deletions trunk/drivers/net/gianfar.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,176 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
dma_addr_t addr = 0;
void *vaddr;
int i;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = &priv->ofdev->dev;
struct gfar __iomem *regs = priv->regs;

/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
sizeof(*rxbdp) * priv->rx_ring_size,
&addr, GFP_KERNEL);
if (!vaddr) {
if (netif_msg_ifup(priv))
pr_err("%s: Could not allocate buffer descriptors!\n",
ndev->name);
return -ENOMEM;
}

priv->tx_bd_base = vaddr;

/* enet DMA only understands physical addresses */
gfar_write(&regs->tbase0, addr);

/* Start the rx descriptor ring where the tx ring leaves off */
addr = addr + sizeof(*txbdp) * priv->tx_ring_size;
vaddr = vaddr + sizeof(*txbdp) * priv->tx_ring_size;
priv->rx_bd_base = vaddr;
gfar_write(&regs->rbase0, addr);

/* Setup the skbuff rings */
priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
priv->tx_ring_size, GFP_KERNEL);
if (!priv->tx_skbuff) {
if (netif_msg_ifup(priv))
pr_err("%s: Could not allocate tx_skbuff\n",
ndev->name);
goto cleanup;
}

for (i = 0; i < priv->tx_ring_size; i++)
priv->tx_skbuff[i] = NULL;

priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
priv->rx_ring_size, GFP_KERNEL);
if (!priv->rx_skbuff) {
if (netif_msg_ifup(priv))
pr_err("%s: Could not allocate rx_skbuff\n",
ndev->name);
goto cleanup;
}

for (i = 0; i < priv->rx_ring_size; i++)
priv->rx_skbuff[i] = NULL;

/* Initialize some variables in our dev structure */
priv->num_txbdfree = priv->tx_ring_size;
priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
priv->cur_rx = priv->rx_bd_base;
priv->skb_curtx = priv->skb_dirtytx = 0;
priv->skb_currx = 0;

/* Initialize Transmit Descriptor Ring */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
txbdp->lstatus = 0;
txbdp->bufPtr = 0;
txbdp++;
}

/* Set the last descriptor in the ring to indicate wrap */
txbdp--;
txbdp->status |= TXBD_WRAP;

rxbdp = priv->rx_bd_base;
for (i = 0; i < priv->rx_ring_size; i++) {
struct sk_buff *skb;

skb = gfar_new_skb(ndev);
if (!skb) {
pr_err("%s: Can't allocate RX buffers\n", ndev->name);
goto cleanup;
}

priv->rx_skbuff[i] = skb;

gfar_new_rxbdp(ndev, rxbdp, skb);

rxbdp++;
}

return 0;

cleanup:
free_skb_resources(priv);
return -ENOMEM;
}

static void gfar_init_mac(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
struct gfar __iomem *regs = priv->regs;
u32 rctrl = 0;
u32 tctrl = 0;
u32 attrs = 0;

/* Configure the coalescing support */
gfar_write(&regs->txic, 0);
if (priv->txcoalescing)
gfar_write(&regs->txic, priv->txic);

gfar_write(&regs->rxic, 0);
if (priv->rxcoalescing)
gfar_write(&regs->rxic, priv->rxic);

if (priv->rx_csum_enable)
rctrl |= RCTRL_CHECKSUMMING;

if (priv->extended_hash) {
rctrl |= RCTRL_EXTHASH;

gfar_clear_exact_match(ndev);
rctrl |= RCTRL_EMEN;
}

if (priv->padding) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PADDING(priv->padding);
}

/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
tctrl |= TCTRL_VLINS;
}

/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);

if (ndev->features & NETIF_F_IP_CSUM)
tctrl |= TCTRL_INIT_CSUM;

gfar_write(&regs->tctrl, tctrl);

/* Set the extraction length and index */
attrs = ATTRELI_EL(priv->rx_stash_size) |
ATTRELI_EI(priv->rx_stash_index);

gfar_write(&regs->attreli, attrs);

/* Start with defaults, and add stashing or locking
* depending on the approprate variables */
attrs = ATTR_INIT_SETTINGS;

if (priv->bd_stash_en)
attrs |= ATTR_BDSTASH;

if (priv->rx_stash_size != 0)
attrs |= ATTR_BUFSTASH;

gfar_write(&regs->attr, attrs);

gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}

static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
Expand Down Expand Up @@ -927,106 +1097,17 @@ void gfar_start(struct net_device *dev)
/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
dma_addr_t addr = 0;
void *vaddr;
int i;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = &priv->ofdev->dev;
struct gfar __iomem *regs = priv->regs;
int err;
u32 rctrl = 0;
u32 tctrl = 0;
u32 attrs = 0;

gfar_write(&regs->imask, IMASK_INIT_CLEAR);

/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
sizeof(*rxbdp) * priv->rx_ring_size,
&addr, GFP_KERNEL);
if (!vaddr) {
if (netif_msg_ifup(priv))
pr_err("%s: Could not allocate buffer descriptors!\n",
ndev->name);
return -ENOMEM;
}

priv->tx_bd_base = vaddr;

/* enet DMA only understands physical addresses */
gfar_write(&regs->tbase0, addr);

/* Start the rx descriptor ring where the tx ring leaves off */
addr = addr + sizeof(*txbdp) * priv->tx_ring_size;
vaddr = vaddr + sizeof(*txbdp) * priv->tx_ring_size;
priv->rx_bd_base = vaddr;
gfar_write(&regs->rbase0, addr);

/* Setup the skbuff rings */
priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
priv->tx_ring_size, GFP_KERNEL);
if (!priv->tx_skbuff) {
if (netif_msg_ifup(priv))
pr_err("%s: Could not allocate tx_skbuff\n",
ndev->name);
err = -ENOMEM;
goto tx_skb_fail;
}

for (i = 0; i < priv->tx_ring_size; i++)
priv->tx_skbuff[i] = NULL;

priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
priv->rx_ring_size, GFP_KERNEL);
if (!priv->rx_skbuff) {
if (netif_msg_ifup(priv))
pr_err("%s: Could not allocate rx_skbuff\n",
ndev->name);
err = -ENOMEM;
goto rx_skb_fail;
}

for (i = 0; i < priv->rx_ring_size; i++)
priv->rx_skbuff[i] = NULL;

/* Initialize some variables in our dev structure */
priv->num_txbdfree = priv->tx_ring_size;
priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
priv->cur_rx = priv->rx_bd_base;
priv->skb_curtx = priv->skb_dirtytx = 0;
priv->skb_currx = 0;

/* Initialize Transmit Descriptor Ring */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
txbdp->lstatus = 0;
txbdp->bufPtr = 0;
txbdp++;
}

/* Set the last descriptor in the ring to indicate wrap */
txbdp--;
txbdp->status |= TXBD_WRAP;

rxbdp = priv->rx_bd_base;
for (i = 0; i < priv->rx_ring_size; i++) {
struct sk_buff *skb;

skb = gfar_new_skb(ndev);
if (!skb) {
pr_err("%s: Can't allocate RX buffers\n", ndev->name);
err = -ENOMEM;
goto err_rxalloc_fail;
}

priv->rx_skbuff[i] = skb;

gfar_new_rxbdp(ndev, rxbdp, skb);
err = gfar_alloc_skb_resources(ndev);
if (err)
return err;

rxbdp++;
}
gfar_init_mac(ndev);

/* If the device has multiple interrupts, register for
* them. Otherwise, only register for the one */
Expand Down Expand Up @@ -1070,81 +1151,18 @@ int startup_gfar(struct net_device *ndev)
}
}

phy_start(priv->phydev);

/* Configure the coalescing support */
gfar_write(&regs->txic, 0);
if (priv->txcoalescing)
gfar_write(&regs->txic, priv->txic);

gfar_write(&regs->rxic, 0);
if (priv->rxcoalescing)
gfar_write(&regs->rxic, priv->rxic);

if (priv->rx_csum_enable)
rctrl |= RCTRL_CHECKSUMMING;

if (priv->extended_hash) {
rctrl |= RCTRL_EXTHASH;

gfar_clear_exact_match(ndev);
rctrl |= RCTRL_EMEN;
}

if (priv->padding) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PADDING(priv->padding);
}

/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
tctrl |= TCTRL_VLINS;
}

/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);

if (ndev->features & NETIF_F_IP_CSUM)
tctrl |= TCTRL_INIT_CSUM;

gfar_write(&regs->tctrl, tctrl);

/* Set the extraction length and index */
attrs = ATTRELI_EL(priv->rx_stash_size) |
ATTRELI_EI(priv->rx_stash_index);

gfar_write(&regs->attreli, attrs);

/* Start with defaults, and add stashing or locking
* depending on the approprate variables */
attrs = ATTR_INIT_SETTINGS;

if (priv->bd_stash_en)
attrs |= ATTR_BDSTASH;

if (priv->rx_stash_size != 0)
attrs |= ATTR_BUFSTASH;

gfar_write(&regs->attr, attrs);

gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);

/* Start the controller */
gfar_start(ndev);

phy_start(priv->phydev);

return 0;

rx_irq_fail:
free_irq(priv->interruptTransmit, ndev);
tx_irq_fail:
free_irq(priv->interruptError, ndev);
err_irq_fail:
err_rxalloc_fail:
rx_skb_fail:
tx_skb_fail:
free_skb_resources(priv);
return err;
}
Expand Down

0 comments on commit d28fbeb

Please sign in to comment.