Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 194569
b: refs/heads/master
c: 59d7198
h: refs/heads/master
i:
  194567: 78dc417
v: v3
  • Loading branch information
Alexander Duyck authored and David S. Miller committed Apr 27, 2010
1 parent 7090e04 commit 16514b0
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 59 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0be3f55f8aa5f9d1882255128bd79d4885b0cbe4
refs/heads/master: 59d71989352deb71bc0bba83802820de765f6e25
2 changes: 1 addition & 1 deletion trunk/drivers/net/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct pci_dev *pdev; /* pci device for dma mapping */
struct device *dev; /* device pointer for dma mapping */
dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
Expand Down
8 changes: 4 additions & 4 deletions trunk/drivers/net/igb/igb_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -1394,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)

/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
tx_ring->pdev = adapter->pdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;

Expand All @@ -1408,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)

/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
rx_ring->pdev = adapter->pdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
Expand Down Expand Up @@ -1604,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
buffer_info = &rx_ring->buffer_info[rx_ntc];

/* unmap rx buffer, will be remapped by alloc_rx_buffers */
pci_unmap_single(rx_ring->pdev,
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
buffer_info->dma = 0;

/* verify contents of skb */
Expand Down
109 changes: 56 additions & 53 deletions trunk/drivers/net/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
Expand All @@ -364,7 +364,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
Expand Down Expand Up @@ -1398,15 +1398,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;

pci_using_dac = 0;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
Expand Down Expand Up @@ -2080,7 +2080,7 @@ static int igb_close(struct net_device *netdev)
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
struct pci_dev *pdev = tx_ring->pdev;
struct device *dev = tx_ring->dev;
int size;

size = sizeof(struct igb_buffer) * tx_ring->count;
Expand All @@ -2093,9 +2093,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);

tx_ring->desc = pci_alloc_consistent(pdev,
tx_ring->size,
&tx_ring->dma);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);

if (!tx_ring->desc)
goto err;
Expand All @@ -2106,7 +2107,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)

err:
vfree(tx_ring->buffer_info);
dev_err(&pdev->dev,
dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
Expand Down Expand Up @@ -2230,7 +2231,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct pci_dev *pdev = rx_ring->pdev;
struct device *dev = rx_ring->dev;
int size, desc_len;

size = sizeof(struct igb_buffer) * rx_ring->count;
Expand All @@ -2245,8 +2246,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);

rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
&rx_ring->dma);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);

if (!rx_ring->desc)
goto err;
Expand All @@ -2259,8 +2262,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
dev_err(&pdev->dev, "Unable to allocate memory for "
"the receive descriptor ring\n");
dev_err(dev, "Unable to allocate memory for the receive descriptor"
" ring\n");
return -ENOMEM;
}

Expand Down Expand Up @@ -2636,8 +2639,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
if (!tx_ring->desc)
return;

pci_free_consistent(tx_ring->pdev, tx_ring->size,
tx_ring->desc, tx_ring->dma);
dma_free_coherent(tx_ring->dev, tx_ring->size,
tx_ring->desc, tx_ring->dma);

tx_ring->desc = NULL;
}
Expand All @@ -2661,15 +2664,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
pci_unmap_page(tx_ring->pdev,
dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
else
pci_unmap_single(tx_ring->pdev,
dma_unmap_single(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
Expand Down Expand Up @@ -2740,8 +2743,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->desc)
return;

pci_free_consistent(rx_ring->pdev, rx_ring->size,
rx_ring->desc, rx_ring->dma);
dma_free_coherent(rx_ring->dev, rx_ring->size,
rx_ring->desc, rx_ring->dma);

rx_ring->desc = NULL;
}
Expand Down Expand Up @@ -2777,10 +2780,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
pci_unmap_single(rx_ring->pdev,
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
buffer_info->dma = 0;
}

Expand All @@ -2789,10 +2792,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
pci_unmap_page(rx_ring->pdev,
dma_unmap_page(rx_ring->dev,
buffer_info->page_dma,
PAGE_SIZE / 2,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
}
if (buffer_info->page) {
Expand Down Expand Up @@ -3480,7 +3483,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
struct pci_dev *pdev = tx_ring->pdev;
struct device *dev = tx_ring->dev;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
Expand Down Expand Up @@ -3531,7 +3534,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
break;
default:
if (unlikely(net_ratelimit()))
dev_warn(&pdev->dev,
dev_warn(dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
Expand Down Expand Up @@ -3565,7 +3568,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
struct igb_buffer *buffer_info;
struct pci_dev *pdev = tx_ring->pdev;
struct device *dev = tx_ring->dev;
unsigned int len = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
Expand All @@ -3578,9 +3581,9 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
buffer_info->dma = dma_map_single(dev, skb->data, len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;

for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
Expand All @@ -3600,12 +3603,12 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = pci_map_page(pdev,
buffer_info->dma = dma_map_page(dev,
frag->page,
frag->page_offset,
len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;

}
Expand All @@ -3617,7 +3620,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
return ++count;

dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
dev_err(dev, "TX DMA map failed\n");

/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
Expand Down Expand Up @@ -5059,7 +5062,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {

/* detected Tx unit hang */
dev_err(&tx_ring->pdev->dev,
dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH <%x>\n"
Expand Down Expand Up @@ -5138,7 +5141,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;

dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
}

static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
Expand Down Expand Up @@ -5193,7 +5196,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
{
struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
struct pci_dev *pdev = rx_ring->pdev;
struct device *dev = rx_ring->dev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
Expand Down Expand Up @@ -5233,9 +5236,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
cleaned_count++;

if (buffer_info->dma) {
pci_unmap_single(pdev, buffer_info->dma,
dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
Expand All @@ -5245,8 +5248,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
}

if (length) {
pci_unmap_page(pdev, buffer_info->page_dma,
PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
dma_unmap_page(dev, buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
buffer_info->page_dma = 0;

skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Expand Down Expand Up @@ -5354,12 +5357,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
pci_map_page(rx_ring->pdev, buffer_info->page,
dma_map_page(rx_ring->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(rx_ring->pdev,
buffer_info->page_dma)) {
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
buffer_info->page_dma)) {
buffer_info->page_dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
Expand All @@ -5377,12 +5380,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->skb = skb;
}
if (!buffer_info->dma) {
buffer_info->dma = pci_map_single(rx_ring->pdev,
buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(rx_ring->pdev,
buffer_info->dma)) {
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
buffer_info->dma)) {
buffer_info->dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
Expand Down

0 comments on commit 16514b0

Please sign in to comment.