diff --git a/[refs] b/[refs] index 267791802c5a..e4b38558cb2b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e94bd23f67c87011f012f26ca0af3fcf6878eeac +refs/heads/master: 074b8ba3863dd168befdba6c9115e990349a6755 diff --git a/trunk/Documentation/networking/netdevices.txt b/trunk/Documentation/networking/netdevices.txt index ce1361f95243..847cedb238f6 100644 --- a/trunk/Documentation/networking/netdevices.txt +++ b/trunk/Documentation/networking/netdevices.txt @@ -49,7 +49,7 @@ dev->hard_start_xmit: for this and return -1 when the spin lock fails. The locking there should also properly protect against set_multicast_list - Context: Process with BHs disabled or BH (timer). + Context: BHs disabled Notes: netif_queue_stopped() is guaranteed false Interrupts must be enabled when calling hard_start_xmit. (Interrupts must also be enabled when enabling the BH handler.) diff --git a/trunk/drivers/ata/sata_nv.c b/trunk/drivers/ata/sata_nv.c index 4cea3ef75226..82a615db4d50 100644 --- a/trunk/drivers/ata/sata_nv.c +++ b/trunk/drivers/ata/sata_nv.c @@ -288,12 +288,6 @@ static const struct pci_device_id nv_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, - PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, - { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, - PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC }, { } /* terminate list */ }; diff --git a/trunk/drivers/net/e1000/e1000.h b/trunk/drivers/net/e1000/e1000.h index 16a6edfeba41..a9ea67e75c1b 100644 --- a/trunk/drivers/net/e1000/e1000.h +++ b/trunk/drivers/net/e1000/e1000.h @@ -333,9 +333,11 @@ struct e1000_adapter { struct e1000_tx_ring test_tx_ring; struct e1000_rx_ring test_rx_ring; + int msg_enable; +#ifdef CONFIG_PCI_MSI boolean_t have_msi; - +#endif /* to not mess up cache alignment, always add to the bottom */ boolean_t tso_force; boolean_t smart_power_down; /* phy smart power down */ diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c index 49be393e1c1d..637ae8f68791 100644 --- a/trunk/drivers/net/e1000/e1000_main.c +++ b/trunk/drivers/net/e1000/e1000_main.c @@ -158,7 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data); +#ifdef CONFIG_PCI_MSI static irqreturn_t e1000_intr_msi(int irq, void *data); +#endif static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); #ifdef CONFIG_E1000_NAPI @@ -298,26 +300,31 @@ module_exit(e1000_exit_module); static int e1000_request_irq(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - void (*handler) = &e1000_intr; - int irq_flags = IRQF_SHARED; - int err; + int flags, err = 0; + flags = IRQF_SHARED; +#ifdef CONFIG_PCI_MSI if (adapter->hw.mac_type >= e1000_82571) { - adapter->have_msi = !pci_enable_msi(adapter->pdev); - if (adapter->have_msi) { - handler = &e1000_intr_msi; - irq_flags = 0; + adapter->have_msi = TRUE; + if ((err = pci_enable_msi(adapter->pdev))) { + DPRINTK(PROBE, ERR, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = FALSE; } } - - err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, - netdev); - if (err) { - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); + if (adapter->have_msi) { + flags &= ~IRQF_SHARED; + err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags, + netdev->name, netdev); + if (err) + DPRINTK(PROBE, ERR, + "Unable to allocate interrupt Error: %d\n", err); + } else +#endif + if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, + netdev->name, netdev))) DPRINTK(PROBE, ERR, "Unable to allocate interrupt Error: %d\n", err); - } return err; } @@ -328,8 +335,10 @@ static void e1000_free_irq(struct e1000_adapter *adapter) free_irq(adapter->pdev->irq, netdev); +#ifdef CONFIG_PCI_MSI if (adapter->have_msi) pci_disable_msi(adapter->pdev); +#endif } /** @@ -3735,6 +3744,7 @@ e1000_update_stats(struct e1000_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } +#ifdef CONFIG_PCI_MSI /** * e1000_intr_msi - Interrupt Handler @@ -3800,6 +3810,7 @@ e1000_intr_msi(int irq, void *data) return IRQ_HANDLED; } +#endif /** * e1000_intr - Interrupt Handler diff --git a/trunk/drivers/net/gianfar.c b/trunk/drivers/net/gianfar.c index f5b3cba23fc5..b666a0cc0642 100644 --- a/trunk/drivers/net/gianfar.c +++ b/trunk/drivers/net/gianfar.c @@ -1025,15 +1025,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - /* The powerpc-specific eieio() is used, as wmb() has too strong - * semantics (it requires synchronization between cacheable and - * uncacheable mappings, which eieio doesn't provide and which we - * don't need), thus requiring a more expensive sync instruction. At - * some point, the set of architecture-independent barrier functions - * should be expanded to include weaker barriers. - */ - - eieio(); txbdp->status = status; /* If this was the last BD in the ring, the next one */ @@ -1310,7 +1301,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) bdp->length = 0; /* Mark the buffer empty */ - eieio(); bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); return skb; @@ -1494,7 +1484,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) bdp = priv->cur_rx; while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { - rmb(); skb = priv->rx_skbuff[priv->skb_currx]; if (!(bdp->status & diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_core.c b/trunk/drivers/net/ibm_emac/ibm_emac_core.c index f752e5fc65ba..50035ebd4f52 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_core.c +++ b/trunk/drivers/net/ibm_emac/ibm_emac_core.c @@ -926,7 +926,7 @@ static int emac_link_differs(struct ocp_enet_private *dev) int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; int speed, pause, asym_pause; - if (r & EMAC_MR1_MF_1000) + if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS)) speed = SPEED_1000; else if (r & EMAC_MR1_MF_100) speed = SPEED_100; diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_mal.c b/trunk/drivers/net/ibm_emac/ibm_emac_mal.c index cabd9846a5ee..6c0f071e4052 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/trunk/drivers/net/ibm_emac/ibm_emac_mal.c @@ -59,7 +59,8 @@ int __init mal_register_commac(struct ibm_ocp_mal *mal, return 0; } -void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +void __exit mal_unregister_commac(struct ibm_ocp_mal *mal, + struct mal_commac *commac) { unsigned long flags; local_irq_save(flags); diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_mal.h b/trunk/drivers/net/ibm_emac/ibm_emac_mal.h index 64bc338acc6c..407d2acbf7c7 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_mal.h +++ b/trunk/drivers/net/ibm_emac/ibm_emac_mal.h @@ -223,7 +223,8 @@ void mal_exit(void) __exit; int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) __init; -void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac); +void mal_unregister_commac(struct ibm_ocp_mal *mal, + struct mal_commac *commac) __exit; int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size); /* Returns BD ring offset for a particular channel diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_phy.c b/trunk/drivers/net/ibm_emac/ibm_emac_phy.c index e57862b34cae..9074f76ee2bf 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_phy.c +++ b/trunk/drivers/net/ibm_emac/ibm_emac_phy.c @@ -22,7 +22,6 @@ #include -#include "ibm_emac_core.h" #include "ibm_emac_phy.h" static inline int phy_read(struct mii_phy *phy, int reg) @@ -35,38 +34,10 @@ static inline void phy_write(struct mii_phy *phy, int reg, int val) phy->mdio_write(phy->dev, phy->address, reg, val); } -/* - * polls MII_BMCR until BMCR_RESET bit clears or operation times out. - * - * returns: - * >= 0 => success, value in BMCR returned to caller - * -EBUSY => failure, RESET bit never cleared - * otherwise => failure, lower level PHY read failed - */ -static int mii_spin_reset_complete(struct mii_phy *phy) -{ - int val; - int limit = 10000; - - while (limit--) { - val = phy_read(phy, MII_BMCR); - if (val >= 0 && !(val & BMCR_RESET)) - return val; /* success */ - udelay(10); - } - if (val & BMCR_RESET) - val = -EBUSY; - - if (net_ratelimit()) - printk(KERN_ERR "emac%d: PHY reset timeout (%d)\n", - ((struct ocp_enet_private *)phy->dev->priv)->def->index, - val); - return val; -} - int mii_reset_phy(struct mii_phy *phy) { int val; + int limit = 10000; val = phy_read(phy, MII_BMCR); val &= ~BMCR_ISOLATE; @@ -75,11 +46,16 @@ int mii_reset_phy(struct mii_phy *phy) udelay(300); - val = mii_spin_reset_complete(phy); - if (val >= 0 && (val & BMCR_ISOLATE)) + while (limit--) { + val = phy_read(phy, MII_BMCR); + if (val >= 0 && (val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); - return val < 0; + return limit <= 0; } static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) @@ -126,14 +102,8 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) } /* Start/Restart aneg */ - /* on some PHYs (e.g. National DP83843) a write to MII_ADVERTISE - * causes BMCR_RESET to be set on the next read of MII_BMCR, which - * if not checked for causes the PHY to be reset below */ - ctl = mii_spin_reset_complete(phy); - if (ctl < 0) - return ctl; - - ctl |= BMCR_ANENABLE | BMCR_ANRESTART; + ctl = phy_read(phy, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(phy, MII_BMCR, ctl); return 0; @@ -148,13 +118,13 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = phy->asym_pause = 0; - /* First reset the PHY */ - mii_reset_phy(phy); - ctl = phy_read(phy, MII_BMCR); if (ctl < 0) return ctl; - ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE | BMCR_SPEED1000); + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); + + /* First reset the PHY */ + phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch (speed) { diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c index 9dbb5e5936c3..53d281cb9a16 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c +++ b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c @@ -162,7 +162,7 @@ void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed) out_be32(&dev->base->ssr, ssr); } -void __rgmii_fini(struct ocp_device *ocpdev, int input) +void __exit __rgmii_fini(struct ocp_device *ocpdev, int input) { struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev); BUG_ON(!dev || dev->users == 0); diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h index 971e45815c6c..117ea486c2ca 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h +++ b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h @@ -37,7 +37,7 @@ struct ibm_ocp_rgmii { #ifdef CONFIG_IBM_EMAC_RGMII int rgmii_attach(void *emac) __init; -void __rgmii_fini(struct ocp_device *ocpdev, int input); +void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit; static inline void rgmii_fini(struct ocp_device *ocpdev, int input) { if (ocpdev) diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_tah.c b/trunk/drivers/net/ibm_emac/ibm_emac_tah.c index 3c2d5ba522a1..e287b451bb44 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_tah.c +++ b/trunk/drivers/net/ibm_emac/ibm_emac_tah.c @@ -63,7 +63,7 @@ int __init tah_attach(void *emac) return 0; } -void __tah_fini(struct ocp_device *ocpdev) +void __exit __tah_fini(struct ocp_device *ocpdev) { struct tah_regs *p = ocp_get_drvdata(ocpdev); BUG_ON(!p); diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_tah.h b/trunk/drivers/net/ibm_emac/ibm_emac_tah.h index ccf64915e1e4..38153945a240 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_tah.h +++ b/trunk/drivers/net/ibm_emac/ibm_emac_tah.h @@ -55,7 +55,7 @@ struct tah_regs { #ifdef CONFIG_IBM_EMAC_TAH int tah_attach(void *emac) __init; -void __tah_fini(struct ocp_device *ocpdev); +void __tah_fini(struct ocp_device *ocpdev) __exit; static inline void tah_fini(struct ocp_device *ocpdev) { if (ocpdev) diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c index 2c0fdb0cabff..37dc8f342868 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c +++ b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c @@ -215,7 +215,7 @@ void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed) out_be32(&dev->base->ssr, ssr); } -void __zmii_fini(struct ocp_device *ocpdev, int input) +void __exit __zmii_fini(struct ocp_device *ocpdev, int input) { struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev); BUG_ON(!dev || dev->users == 0); diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h index fad6d8bf983a..972e3a44a09f 100644 --- a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h +++ b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h @@ -40,7 +40,7 @@ struct ibm_ocp_zmii { #ifdef CONFIG_IBM_EMAC_ZMII int zmii_attach(void *emac) __init; -void __zmii_fini(struct ocp_device *ocpdev, int input); +void __zmii_fini(struct ocp_device *ocpdev, int input) __exit; static inline void zmii_fini(struct ocp_device *ocpdev, int input) { if (ocpdev) diff --git a/trunk/drivers/net/ixgb/ixgb.h b/trunk/drivers/net/ixgb/ixgb.h index 3569d5b03388..c8e90861f869 100644 --- a/trunk/drivers/net/ixgb/ixgb.h +++ b/trunk/drivers/net/ixgb/ixgb.h @@ -193,6 +193,8 @@ struct ixgb_adapter { u16 msg_enable; struct ixgb_hw_stats stats; uint32_t alloc_rx_buff_failed; +#ifdef CONFIG_PCI_MSI boolean_t have_msi; +#endif }; #endif /* _IXGB_H_ */ diff --git a/trunk/drivers/net/ixgb/ixgb_main.c b/trunk/drivers/net/ixgb/ixgb_main.c index 991c8833e23c..6d2b059371f1 100644 --- a/trunk/drivers/net/ixgb/ixgb_main.c +++ b/trunk/drivers/net/ixgb/ixgb_main.c @@ -227,7 +227,7 @@ int ixgb_up(struct ixgb_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int err, irq_flags = IRQF_SHARED; + int err; int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; struct ixgb_hw *hw = &adapter->hw; @@ -246,21 +246,26 @@ ixgb_up(struct ixgb_adapter *adapter) /* disable interrupts and get the hardware into a known state */ IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); - /* only enable MSI if bus is in PCI-X mode */ - if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->have_msi = 1; - irq_flags = 0; - } +#ifdef CONFIG_PCI_MSI + { + boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & + IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE; + adapter->have_msi = TRUE; + + if (!pcix) + adapter->have_msi = FALSE; + else if((err = pci_enable_msi(adapter->pdev))) { + DPRINTK(PROBE, ERR, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = FALSE; /* proceed to try to request regular interrupt */ } + } - err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags, - netdev->name, netdev); - if (err) { - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); +#endif + if((err = request_irq(adapter->pdev->irq, &ixgb_intr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, + netdev->name, netdev))) { DPRINTK(PROBE, ERR, "Unable to allocate interrupt Error: %d\n", err); return err; @@ -302,10 +307,11 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) ixgb_irq_disable(adapter); free_irq(adapter->pdev->irq, netdev); - - if (adapter->have_msi) +#ifdef CONFIG_PCI_MSI + if(adapter->have_msi == TRUE) pci_disable_msi(adapter->pdev); +#endif if(kill_watchdog) del_timer_sync(&adapter->watchdog_timer); #ifdef CONFIG_IXGB_NAPI diff --git a/trunk/drivers/net/netxen/netxen_nic_init.c b/trunk/drivers/net/netxen/netxen_nic_init.c index a36892457761..cf0e96adfe44 100644 --- a/trunk/drivers/net/netxen/netxen_nic_init.c +++ b/trunk/drivers/net/netxen/netxen_nic_init.c @@ -1216,7 +1216,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) /* Window = 1 */ writel(consumer, NETXEN_CRB_NORMALIZE(adapter, - recv_crb_registers[adapter->portnum]. + recv_crb_registers[ctxid]. crb_rcv_status_consumer)); } diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index 832fd69a0e59..104e20456e6f 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -150,6 +151,8 @@ static const char *yukon2_name[] = { "FE", /* 0xb7 */ }; +static int dmi_blacklisted; + /* Access to external PHY */ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) { @@ -304,13 +307,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) PHY_M_EC_MAC_S_MSK); ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); - /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ if (hw->chip_id == CHIP_ID_YUKON_EC) - /* set downshift counter to 3x and enable downshift */ ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; else - /* set master & slave downshift counter to 1x */ - ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3); gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); } @@ -327,12 +327,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); - /* downshift on PHY 88E1112 and 88E1149 is changed */ if (sky2->autoneg == AUTONEG_ENABLE && (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) { - /* set downshift counter to 3x and enable downshift */ ctrl &= ~PHY_M_PC_DSC_MSK; ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; } @@ -844,12 +842,10 @@ static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, /* Update chip's next pointer */ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) { - /* Make sure write' to descriptors are complete before we tell hardware */ + q = Y2_QADDR(q, PREF_UNIT_PUT_IDX); wmb(); - sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); - - /* Synchronize I/O on since next processor may write to tail */ - mmiowb(); + sky2_write16(hw, q, idx); + sky2_read16(hw, q); } @@ -981,7 +977,6 @@ static void sky2_rx_stop(struct sky2_port *sky2) /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - mmiowb(); } /* Clean out receive buffer area, assumes receiver hardware stopped */ @@ -1201,7 +1196,7 @@ static int sky2_rx_start(struct sky2_port *sky2) } /* Tell chip about available buffers */ - sky2_put_idx(hw, rxq, sky2->rx_put); + sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); return 0; nomem: sky2_rx_clean(sky2); @@ -1543,8 +1538,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) } sky2->tx_cons = idx; - smp_mb(); - if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) netif_wake_queue(dev); } @@ -1584,6 +1577,13 @@ static int sky2_down(struct net_device *dev) imask &= ~portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); + /* + * Both ports share the NAPI poll on port 0, so if necessary undo the + * the disable that is done in dev_close. + */ + if (sky2->port == 0 && hw->ports > 1) + netif_poll_enable(dev); + sky2_gmac_reset(hw, port); /* Stop transmitter */ @@ -2139,10 +2139,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) switch (le->opcode & ~HW_OWNER) { case OP_RXSTAT: skb = sky2_receive(dev, length, status); - if (unlikely(!skb)) { - sky2->net_stats.rx_dropped++; + if (!skb) goto force_update; - } skb->protocol = eth_type_trans(skb, dev); sky2->net_stats.rx_packets++; @@ -2223,7 +2221,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) /* Fully processed status ring so clear irq */ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); - mmiowb(); exit_loop: if (buf_write[0]) { @@ -2344,12 +2341,6 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n", dev->name, status); - if (status & GM_IS_RX_CO_OV) - gma_read16(hw, port, GM_RX_IRQ_SRC); - - if (status & GM_IS_TX_CO_OV) - gma_read16(hw, port, GM_TX_IRQ_SRC); - if (status & GM_IS_RX_FF_OR) { ++sky2->net_stats.rx_fifo_errors; sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); @@ -2448,7 +2439,6 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (work_done < work_limit) { netif_rx_complete(dev0); - /* end of interrupt, re-enables also acts as I/O synchronization */ sky2_read32(hw, B0_Y2_SP_LISR); return 0; } else { @@ -2544,6 +2534,17 @@ static int __devinit sky2_init(struct sky2_hw *hw) return -EOPNOTSUPP; } + + /* Some Gigabyte motherboards have 88e8056 but cause problems + * There is some unresolved hardware related problem that causes + * descriptor errors and receive data corruption. + */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && dmi_blacklisted) { + dev_err(&hw->pdev->dev, + "88E8056 on this motherboard not supported\n"); + return -EOPNOTSUPP; + } + hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); hw->ports = 1; t8 = sky2_read8(hw, B2_Y2_HW_RES); @@ -3909,8 +3910,24 @@ static struct pci_driver sky2_driver = { .shutdown = sky2_shutdown, }; +static struct dmi_system_id __initdata broken_dmi_table[] = { + { + .ident = "Gigabyte 965P-S3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "965P-S3"), + + }, + }, + { } +}; + static int __init sky2_init_module(void) { + /* Look for sick motherboards */ + if (dmi_check_system(broken_dmi_table)) + dmi_blacklisted = 1; + return pci_register_driver(&sky2_driver); } diff --git a/trunk/drivers/net/spider_net.c b/trunk/drivers/net/spider_net.c index c3964c3d89d9..108adbf5b5eb 100644 --- a/trunk/drivers/net/spider_net.c +++ b/trunk/drivers/net/spider_net.c @@ -430,8 +430,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, /* and we need to have it 128 byte aligned, therefore we allocate a * bit more */ /* allocate an skb */ - descr->skb = netdev_alloc_skb(card->netdev, - bufsize + SPIDER_NET_RXBUF_ALIGN - 1); + descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); if (!descr->skb) { if (netif_msg_rx_err(card) && net_ratelimit()) pr_err("Not enough memory to allocate rx buffer\n");