From ed393732e8997c89981e0eedef91156e7b7dbdb3 Mon Sep 17 00:00:00 2001 From: Jason Gaston Date: Mon, 9 Jan 2006 11:09:13 -0800 Subject: [PATCH] --- yaml --- r: 18842 b: refs/heads/master c: f285757cab254470a10f711cd3cbaf9d92f7aa06 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/MAINTAINERS | 8 +- trunk/arch/sparc/mm/iommu.c | 13 +- trunk/arch/sparc64/kernel/time.c | 22 +- trunk/drivers/net/b44.c | 2 +- trunk/drivers/net/cassini.c | 4 +- trunk/drivers/net/e100.c | 142 +-- trunk/drivers/net/e1000/e1000.h | 51 +- trunk/drivers/net/e1000/e1000_ethtool.c | 167 ++-- trunk/drivers/net/e1000/e1000_hw.c | 53 +- trunk/drivers/net/e1000/e1000_hw.h | 25 +- trunk/drivers/net/e1000/e1000_main.c | 814 ++++++------------ trunk/drivers/net/e1000/e1000_param.c | 14 +- trunk/drivers/net/mv643xx_eth.c | 680 ++++++--------- trunk/drivers/net/skge.c | 20 +- trunk/drivers/net/sky2.c | 219 ++--- trunk/drivers/net/spider_net.c | 512 ++++++----- trunk/drivers/net/spider_net.h | 75 +- trunk/drivers/net/spider_net_ethtool.c | 19 - trunk/drivers/net/tg3.c | 82 +- trunk/drivers/net/tg3.h | 1 - trunk/drivers/net/wireless/airo.c | 21 +- trunk/drivers/net/wireless/atmel.c | 4 +- trunk/drivers/net/wireless/hostap/Kconfig | 22 +- trunk/drivers/net/wireless/hostap/Makefile | 3 +- trunk/drivers/net/wireless/hostap/hostap.h | 37 - .../net/wireless/hostap/hostap_80211.h | 3 - .../net/wireless/hostap/hostap_80211_rx.c | 11 - .../net/wireless/hostap/hostap_80211_tx.c | 15 - trunk/drivers/net/wireless/hostap/hostap_ap.c | 36 +- trunk/drivers/net/wireless/hostap/hostap_ap.h | 2 - .../net/wireless/hostap/hostap_common.h | 3 - .../net/wireless/hostap/hostap_config.h | 13 +- .../drivers/net/wireless/hostap/hostap_info.c | 3 - .../net/wireless/hostap/hostap_ioctl.c | 12 +- .../drivers/net/wireless/hostap/hostap_main.c | 60 +- .../drivers/net/wireless/hostap/hostap_proc.c | 7 - .../drivers/net/wireless/hostap/hostap_wlan.h | 4 - trunk/drivers/net/wireless/ipw2100.c | 434 ++++++++++ trunk/drivers/net/wireless/ipw2200.c | 14 +- .../drivers/net/wireless/prism54/isl_ioctl.c | 2 +- .../drivers/net/wireless/prism54/islpci_eth.c | 2 +- trunk/drivers/net/wireless/ray_cs.c | 2 +- trunk/drivers/net/wireless/wavelan_cs.c | 2 +- trunk/drivers/scsi/ahci.c | 10 + trunk/drivers/video/sbuslib.c | 9 +- trunk/drivers/video/sbuslib.h | 2 +- trunk/include/asm-powerpc/lppaca.h | 4 +- trunk/include/linux/kernel.h | 1 - trunk/include/linux/netfilter_ipv6/ip6t_ah.h | 9 + trunk/include/linux/netfilter_ipv6/ip6t_esp.h | 9 + .../include/linux/netfilter_ipv6/ip6t_frag.h | 9 + .../include/linux/netfilter_ipv6/ip6t_opts.h | 9 + trunk/include/linux/netfilter_ipv6/ip6t_rt.h | 9 + trunk/include/linux/skbuff.h | 2 +- trunk/include/net/ieee80211_crypt.h | 1 - trunk/include/net/iw_handler.h | 2 +- trunk/net/bridge/netfilter/ebt_ip.c | 4 +- trunk/net/bridge/netfilter/ebt_log.c | 4 +- trunk/net/core/filter.c | 13 +- trunk/net/core/netpoll.c | 2 +- trunk/net/core/pktgen.c | 34 +- trunk/net/dccp/ackvec.c | 2 +- trunk/net/ipv4/netfilter/Makefile | 1 + .../ipv4/netfilter/ip_conntrack_proto_gre.c | 1 - trunk/net/ipv4/netfilter/ipt_policy.c | 7 +- trunk/net/ipv4/route.c | 14 +- trunk/net/ipv6/addrconf.c | 2 +- trunk/net/ipv6/anycast.c | 2 +- trunk/net/ipv6/ip6_flowlabel.c | 4 +- trunk/net/ipv6/mcast.c | 6 +- trunk/net/ipv6/netfilter/Makefile | 1 + trunk/net/ipv6/netfilter/ip6t_dst.c | 151 ++-- trunk/net/ipv6/netfilter/ip6t_eui64.c | 68 +- trunk/net/ipv6/netfilter/ip6t_frag.c | 157 ++-- trunk/net/ipv6/netfilter/ip6t_hbh.c | 151 ++-- trunk/net/ipv6/netfilter/ip6t_ipv6header.c | 79 +- trunk/net/ipv6/netfilter/ip6t_owner.c | 28 +- trunk/net/ipv6/netfilter/ip6t_policy.c | 2 +- trunk/net/ipv6/netfilter/ip6t_rt.c | 215 +++-- trunk/net/rxrpc/krxtimod.c | 2 +- trunk/net/rxrpc/proc.c | 12 +- trunk/net/sched/sch_prio.c | 7 +- trunk/net/sched/sch_sfq.c | 4 - trunk/sound/sparc/cs4231.c | 3 +- 85 files changed, 2184 insertions(+), 2505 deletions(-) diff --git a/[refs] b/[refs] index 7c32a8cc256d..d82e0b423fa0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d1138cf035ad5a8dc0796b213bd078a2fb92eb7c +refs/heads/master: f285757cab254470a10f711cd3cbaf9d92f7aa06 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index ff16eac8cf5b..6d1b048c62a1 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -1696,13 +1696,11 @@ M: mtk-manpages@gmx.net W: ftp://ftp.kernel.org/pub/linux/docs/manpages S: Maintained -MARVELL MV643XX ETHERNET DRIVER -P: Dale Farnsworth -M: dale@farnsworth.org +MARVELL MV64340 ETHERNET DRIVER P: Manish Lachwani -M: mlachwani@mvista.com +L: linux-mips@linux-mips.org L: netdev@vger.kernel.org -S: Odd Fixes for 2.4; Maintained for 2.6. +S: Supported MATROX FRAMEBUFFER DRIVER P: Petr Vandrovec diff --git a/trunk/arch/sparc/mm/iommu.c b/trunk/arch/sparc/mm/iommu.c index 77840c804786..489bf68d5f05 100644 --- a/trunk/arch/sparc/mm/iommu.c +++ b/trunk/arch/sparc/mm/iommu.c @@ -295,7 +295,8 @@ static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus) int ioptex; int i; - BUG_ON(busa < iommu->start); + if (busa < iommu->start) + BUG(); ioptex = (busa - iommu->start) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { iopte_val(iommu->page_table[ioptex + i]) = 0; @@ -339,9 +340,9 @@ static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va, iopte_t *first; int ioptex; - BUG_ON((va & ~PAGE_MASK) != 0); - BUG_ON((addr & ~PAGE_MASK) != 0); - BUG_ON((len & ~PAGE_MASK) != 0); + if ((va & ~PAGE_MASK) != 0) BUG(); + if ((addr & ~PAGE_MASK) != 0) BUG(); + if ((len & ~PAGE_MASK) != 0) BUG(); /* page color = physical address */ ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, @@ -404,8 +405,8 @@ static void iommu_unmap_dma_area(unsigned long busa, int len) unsigned long end; int ioptex = (busa - iommu->start) >> PAGE_SHIFT; - BUG_ON((busa & ~PAGE_MASK) != 0); - BUG_ON((len & ~PAGE_MASK) != 0); + if ((busa & ~PAGE_MASK) != 0) BUG(); + if ((len & ~PAGE_MASK) != 0) BUG(); iopte += ioptex; end = busa + len; diff --git a/trunk/arch/sparc64/kernel/time.c b/trunk/arch/sparc64/kernel/time.c index a22930d62adf..459c8fbe02b4 100644 --- a/trunk/arch/sparc64/kernel/time.c +++ b/trunk/arch/sparc64/kernel/time.c @@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = { * Since STICK is constantly updating, we have to access it carefully. * * The sequence we use to read is: - * 1) read high - * 2) read low - * 3) read high again, if it rolled re-read both low and high again. + * 1) read low + * 2) read high + * 3) read low again, if it rolled over increment high by 1 * * Writing STICK safely is also tricky: * 1) write low to zero @@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = { static unsigned long __hbird_read_stick(void) { unsigned long ret, tmp1, tmp2, tmp3; - unsigned long addr = HBIRD_STICK_ADDR+8; + unsigned long addr = HBIRD_STICK_ADDR; - __asm__ __volatile__("ldxa [%1] %5, %2\n" - "1:\n\t" - "sub %1, 0x8, %1\n\t" - "ldxa [%1] %5, %3\n\t" + __asm__ __volatile__("ldxa [%1] %5, %2\n\t" "add %1, 0x8, %1\n\t" + "ldxa [%1] %5, %3\n\t" + "sub %1, 0x8, %1\n\t" "ldxa [%1] %5, %4\n\t" "cmp %4, %2\n\t" - "bne,a,pn %%xcc, 1b\n\t" - " mov %4, %2\n\t" - "sllx %4, 32, %4\n\t" + "blu,a,pn %%xcc, 1f\n\t" + " add %3, 1, %3\n" + "1:\n\t" + "sllx %3, 32, %3\n\t" "or %3, %4, %0\n\t" : "=&r" (ret), "=&r" (addr), "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3) diff --git a/trunk/drivers/net/b44.c b/trunk/drivers/net/b44.c index df9d6e80c4f2..7aa49b974dc5 100644 --- a/trunk/drivers/net/b44.c +++ b/trunk/drivers/net/b44.c @@ -2136,7 +2136,7 @@ static int __init b44_init(void) /* Setup paramaters for syncing RX/TX DMA descriptors */ dma_desc_align_mask = ~(dma_desc_align_size - 1); - dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); + dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); return pci_module_init(&b44_driver); } diff --git a/trunk/drivers/net/cassini.c b/trunk/drivers/net/cassini.c index dde631f8f685..1f7ca453bb4a 100644 --- a/trunk/drivers/net/cassini.c +++ b/trunk/drivers/net/cassini.c @@ -1925,8 +1925,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp, u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); #endif if (netif_msg_intr(cp)) - printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", - cp->dev->name, status, (unsigned long long)compwb); + printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n", + cp->dev->name, status, compwb); /* process all the rings */ for (ring = 0; ring < N_TX_RINGS; ring++) { #ifdef USE_TX_COMPWB diff --git a/trunk/drivers/net/e100.c b/trunk/drivers/net/e100.c index bf1fd2b98bf8..4726722a0635 100644 --- a/trunk/drivers/net/e100.c +++ b/trunk/drivers/net/e100.c @@ -1,25 +1,25 @@ /******************************************************************************* - + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the Free - Software Foundation; either version 2 of the License, or (at your option) + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., 59 + this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - + The full GNU General Public License is included in this distribution in the file called LICENSE. - + Contact Information: Linux NICS Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 @@ -160,7 +160,7 @@ #define DRV_NAME "e100" #define DRV_EXT "-NAPI" -#define DRV_VERSION "3.5.10-k2"DRV_EXT +#define DRV_VERSION "3.4.14-k4"DRV_EXT #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" #define PFX DRV_NAME ": " @@ -320,7 +320,7 @@ enum cuc_dump { cuc_dump_complete = 0x0000A005, cuc_dump_reset_complete = 0x0000A007, }; - + enum port { software_reset = 0x0000, selftest = 0x0001, @@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; writeb(ctrl, &nic->csr->eeprom_ctrl_lo); e100_write_flush(nic); udelay(4); - + writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); e100_write_flush(nic); udelay(4); - + /* Eeprom drives a dummy zero to EEDO after receiving * complete address. Use this to adjust addr_len. */ ctrl = readb(&nic->csr->eeprom_ctrl_lo); @@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) *addr_len -= (i - 16); i = 17; } - + data = (data << 1) | (ctrl & eedo ? 1 : 0); } @@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ } -static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) +static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) { /* *INDENT-OFF* */ static struct { @@ -1213,13 +1213,13 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb * driver can change the algorithm. * * INTDELAY - This loads the dead-man timer with its inital value. -* When this timer expires the interrupt is asserted, and the +* When this timer expires the interrupt is asserted, and the * timer is reset each time a new packet is received. (see * BUNDLEMAX below to set the limit on number of chained packets) * The current default is 0x600 or 1536. Experiments show that * the value should probably stay within the 0x200 - 0x1000. * -* BUNDLEMAX - +* BUNDLEMAX - * This sets the maximum number of frames that will be bundled. In * some situations, such as the TCP windowing algorithm, it may be * better to limit the growth of the bundle size than let it go as @@ -1229,7 +1229,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb * an interrupt for every frame received. If you do not want to put * a limit on the bundle size, set this value to xFFFF. * -* BUNDLESMALL - +* BUNDLESMALL - * This contains a bit-mask describing the minimum size frame that * will be bundled. The default masks the lower 7 bits, which means * that any frame less than 128 bytes in length will not be bundled, @@ -1244,7 +1244,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb * * The current default is 0xFF80, which masks out the lower 7 bits. * This means that any frame which is x7F (127) bytes or smaller -* will cause an immediate interrupt. Because this value must be a +* will cause an immediate interrupt. Because this value must be a * bit mask, there are only a few valid values that can be used. To * turn this feature off, the driver can write the value xFFFF to the * lower word of this instruction (in the same way that the other @@ -1253,7 +1253,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb * standard Ethernet frames are <= 2047 bytes in length. *************************************************************************/ -/* if you wish to disable the ucode functionality, while maintaining the +/* if you wish to disable the ucode functionality, while maintaining the * workarounds it provides, set the following defines to: * BUNDLESMALL 0 * BUNDLEMAX 1 @@ -1284,46 +1284,12 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb for (i = 0; i < UCODE_SIZE; i++) cb->u.ucode[i] = cpu_to_le32(ucode[i]); - cb->command = cpu_to_le16(cb_ucode | cb_el); + cb->command = cpu_to_le16(cb_ucode); return; } noloaducode: - cb->command = cpu_to_le16(cb_nop | cb_el); -} - -static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb, - void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) -{ - int err = 0, counter = 50; - struct cb *cb = nic->cb_to_clean; - - if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode))) - DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err); - - /* must restart cuc */ - nic->cuc_cmd = cuc_start; - - /* wait for completion */ - e100_write_flush(nic); - udelay(10); - - /* wait for possibly (ouch) 500ms */ - while (!(cb->status & cpu_to_le16(cb_complete))) { - msleep(10); - if (!--counter) break; - } - - /* ack any interupts, something could have been set */ - writeb(~0, &nic->csr->scb.stat_ack); - - /* if the command failed, or is not OK, notify and return */ - if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { - DPRINTK(PROBE,ERR, "ucode load failed\n"); - err = -EPERM; - } - - return err; + cb->command = cpu_to_le16(cb_nop); } static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, @@ -1391,13 +1357,13 @@ static int e100_phy_init(struct nic *nic) mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); } - if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { /* enable/disable MDI/MDI-X auto-switching. MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || - (nic->mac == mac_82551_10) || (nic->mii.force_media) || - !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) + (nic->mac == mac_82551_10) || (nic->mii.force_media) || + !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0); else mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH); @@ -1422,7 +1388,7 @@ static int e100_hw_init(struct nic *nic) return err; if((err = e100_exec_cmd(nic, ruc_load_base, 0))) return err; - if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode))) + if((err = e100_exec_cb(nic, NULL, e100_load_ucode))) return err; if((err = e100_exec_cb(nic, NULL, e100_configure))) return err; @@ -1527,7 +1493,7 @@ static void e100_update_stats(struct nic *nic) } } - + if(e100_exec_cmd(nic, cuc_dump_reset, 0)) DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); } @@ -1576,10 +1542,10 @@ static void e100_watchdog(unsigned long data) mii_check_link(&nic->mii); /* Software generated interrupt to recover from (rare) Rx - * allocation failure. - * Unfortunately have to use a spinlock to not re-enable interrupts - * accidentally, due to hardware that shares a register between the - * interrupt mask bit and the SW Interrupt generation bit */ + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ spin_lock_irq(&nic->cmd_lock); writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); spin_unlock_irq(&nic->cmd_lock); @@ -1864,7 +1830,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, struct rx *rx_to_start = NULL; /* are we already rnr? then pay attention!!! this ensures that - * the state machine progression never allows a start with a + * the state machine progression never allows a start with a * partially cleaned list, avoiding a race between hardware * and rx_to_clean when in NAPI mode */ if(RU_SUSPENDED == nic->ru_running) @@ -2100,7 +2066,7 @@ static void e100_tx_timeout(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); - /* Reset outside of interrupt context, to avoid request_irq + /* Reset outside of interrupt context, to avoid request_irq * in interrupt context */ schedule_work(&nic->tx_timeout_task); } @@ -2347,7 +2313,7 @@ static int e100_set_ringparam(struct net_device *netdev, struct param_range *rfds = &nic->params.rfds; struct param_range *cbs = &nic->params.cbs; - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; if(netif_running(netdev)) @@ -2665,9 +2631,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, nic->flags |= wol_magic; /* ack any pending wake events, disable PME */ - err = pci_enable_wake(pdev, 0, 0); - if (err) - DPRINTK(PROBE, ERR, "Error clearing wake event\n"); + pci_enable_wake(pdev, 0, 0); strcpy(netdev->name, "eth%d"); if((err = register_netdev(netdev))) { @@ -2718,7 +2682,6 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); - int retval; if(netif_running(netdev)) e100_down(nic); @@ -2726,14 +2689,9 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(netdev); pci_save_state(pdev); - retval = pci_enable_wake(pdev, pci_choose_state(pdev, state), - nic->flags & (wol_magic | e100_asf(nic))); - if (retval) - DPRINTK(PROBE,ERR, "Error enabling wake\n"); + pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic))); pci_disable_device(pdev); - retval = pci_set_power_state(pdev, pci_choose_state(pdev, state)); - if (retval) - DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } @@ -2742,16 +2700,11 @@ static int e100_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); - int retval; - retval = pci_set_power_state(pdev, PCI_D0); - if (retval) - DPRINTK(PROBE,ERR, "Error waking adapter\n"); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* ack any pending wake events, disable PME */ - retval = pci_enable_wake(pdev, 0, 0); - if (retval) - DPRINTK(PROBE,ERR, "Error clearing wake events\n"); + pci_enable_wake(pdev, 0, 0); if(e100_hw_init(nic)) DPRINTK(HW, ERR, "e100_hw_init failed\n"); @@ -2768,15 +2721,12 @@ static void e100_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); - int retval; #ifdef CONFIG_PM - retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); + pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); #else - retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); + pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); #endif - if (retval) - DPRINTK(PROBE,ERR, "Error enabling wake\n"); } @@ -2789,7 +2739,7 @@ static struct pci_driver e100_driver = { .suspend = e100_suspend, .resume = e100_resume, #endif - .shutdown = e100_shutdown, + .shutdown = e100_shutdown, }; static int __init e100_init_module(void) diff --git a/trunk/drivers/net/e1000/e1000.h b/trunk/drivers/net/e1000/e1000.h index 27c77306193b..e02e9ba2e18b 100644 --- a/trunk/drivers/net/e1000/e1000.h +++ b/trunk/drivers/net/e1000/e1000.h @@ -72,6 +72,10 @@ #include #include #include +#ifdef CONFIG_E1000_MQ +#include +#include +#endif #define BAR_0 0 #define BAR_1 1 @@ -83,10 +87,6 @@ struct e1000_adapter; #include "e1000_hw.h" -#ifdef CONFIG_E1000_MQ -#include -#include -#endif #ifdef DBG #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) @@ -169,13 +169,6 @@ struct e1000_buffer { uint16_t next_to_watch; }; -#ifdef CONFIG_E1000_MQ -struct e1000_queue_stats { - uint64_t packets; - uint64_t bytes; -}; -#endif - struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; @@ -198,12 +191,10 @@ struct e1000_tx_ring { spinlock_t tx_lock; uint16_t tdh; uint16_t tdt; + uint64_t pkt; boolean_t last_tx_tso; -#ifdef CONFIG_E1000_MQ - struct e1000_queue_stats tx_stats; -#endif }; struct e1000_rx_ring { @@ -225,17 +216,9 @@ struct e1000_rx_ring { struct e1000_ps_page *ps_page; struct e1000_ps_page_dma *ps_page_dma; - struct sk_buff *rx_skb_top; - struct sk_buff *rx_skb_prev; - - /* cpu for rx queue */ - int cpu; - uint16_t rdh; uint16_t rdt; -#ifdef CONFIG_E1000_MQ - struct e1000_queue_stats rx_stats; -#endif + uint64_t pkt; }; #define E1000_DESC_UNUSED(R) \ @@ -268,9 +251,6 @@ struct e1000_adapter { uint16_t link_speed; uint16_t link_duplex; spinlock_t stats_lock; -#ifdef CONFIG_E1000_NAPI - spinlock_t tx_queue_lock; -#endif atomic_t irq_sem; struct work_struct tx_timeout_task; struct work_struct watchdog_task; @@ -284,7 +264,6 @@ struct e1000_adapter { #ifdef CONFIG_E1000_MQ struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ #endif - unsigned long tx_queue_len; uint32_t txd_cmd; uint32_t tx_int_delay; uint32_t tx_abs_int_delay; @@ -292,11 +271,9 @@ struct e1000_adapter { uint64_t gotcl_old; uint64_t tpt_old; uint64_t colc_old; - uint32_t tx_timeout_count; uint32_t tx_fifo_head; uint32_t tx_head_addr; uint32_t tx_fifo_size; - uint8_t tx_timeout_factor; atomic_t tx_fifo_stall; boolean_t pcix_82544; boolean_t detect_tx_hung; @@ -304,15 +281,14 @@ struct e1000_adapter { /* RX */ #ifdef CONFIG_E1000_NAPI boolean_t (*clean_rx) (struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int *work_done, int work_to_do); + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); #else boolean_t (*clean_rx) (struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring); #endif void (*alloc_rx_buf) (struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); + struct e1000_rx_ring *rx_ring); struct e1000_rx_ring *rx_ring; /* One per active queue */ #ifdef CONFIG_E1000_NAPI struct net_device *polling_netdev; /* One per active queue */ @@ -320,15 +296,13 @@ struct e1000_adapter { #ifdef CONFIG_E1000_MQ struct net_device **cpu_netdev; /* per-cpu */ struct call_async_data_struct rx_sched_call_data; - cpumask_t cpumask; + int cpu_for_queue[4]; #endif - int num_tx_queues; - int num_rx_queues; + int num_queues; uint64_t hw_csum_err; uint64_t hw_csum_good; uint64_t rx_hdr_split; - uint32_t alloc_rx_buff_failed; uint32_t rx_int_delay; uint32_t rx_abs_int_delay; boolean_t rx_csum; @@ -356,7 +330,6 @@ struct e1000_adapter { struct e1000_rx_ring test_rx_ring; - u32 *config_space; int msg_enable; #ifdef CONFIG_PCI_MSI boolean_t have_msi; diff --git a/trunk/drivers/net/e1000/e1000_ethtool.c b/trunk/drivers/net/e1000/e1000_ethtool.c index d252297e4db0..c88f1a3c1b1d 100644 --- a/trunk/drivers/net/e1000/e1000_ethtool.c +++ b/trunk/drivers/net/e1000/e1000_ethtool.c @@ -80,7 +80,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = { { "tx_deferred_ok", E1000_STAT(stats.dc) }, { "tx_single_coll_ok", E1000_STAT(stats.scc) }, { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, - { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, { "rx_long_length_errors", E1000_STAT(stats.roc) }, { "rx_short_length_errors", E1000_STAT(stats.ruc) }, { "rx_align_errors", E1000_STAT(stats.algnerrc) }, @@ -94,20 +93,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = { { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, { "rx_header_split", E1000_STAT(rx_hdr_split) }, - { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, }; - -#ifdef CONFIG_E1000_MQ -#define E1000_QUEUE_STATS_LEN \ - (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \ - ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \ - * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t)) -#else -#define E1000_QUEUE_STATS_LEN 0 -#endif -#define E1000_GLOBAL_STATS_LEN \ +#define E1000_STATS_LEN \ sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) -#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", @@ -195,15 +183,7 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* When SoL/IDER sessions are active, autoneg/speed/duplex - * cannot be changed */ - if (e1000_check_phy_reset_block(hw)) { - DPRINTK(DRV, ERR, "Cannot change link characteristics " - "when SoL/IDER is active.\n"); - return -EINVAL; - } - - if (ecmd->autoneg == AUTONEG_ENABLE) { + if(ecmd->autoneg == AUTONEG_ENABLE) { hw->autoneg = 1; if(hw->media_type == e1000_media_type_fiber) hw->autoneg_advertised = ADVERTISED_1000baseT_Full | @@ -587,21 +567,21 @@ e1000_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->driver, e1000_driver_name, 32); strncpy(drvinfo->version, e1000_driver_version, 32); - - /* EEPROM image version # is reported as firmware version # for + + /* EEPROM image version # is reported as firware version # for * 8257{1|2|3} controllers */ e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); switch (adapter->hw.mac_type) { case e1000_82571: case e1000_82572: case e1000_82573: - sprintf(firmware_version, "%d.%d-%d", + sprintf(firmware_version, "%d.%d-%d", (eeprom_data & 0xF000) >> 12, (eeprom_data & 0x0FF0) >> 4, eeprom_data & 0x000F); break; default: - sprintf(firmware_version, "N/A"); + sprintf(firmware_version, "n/a"); } strncpy(drvinfo->fw_version, firmware_version, 32); @@ -643,8 +623,8 @@ e1000_set_ringparam(struct net_device *netdev, struct e1000_rx_ring *rxdr, *rx_old, *rx_new; int i, err, tx_ring_size, rx_ring_size; - tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; - rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; + tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; + rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; if (netif_running(adapter->netdev)) e1000_down(adapter); @@ -683,10 +663,10 @@ e1000_set_ringparam(struct net_device *netdev, E1000_MAX_TXD : E1000_MAX_82544_TXD)); E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); - for (i = 0; i < adapter->num_tx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) { txdr[i].count = txdr->count; - for (i = 0; i < adapter->num_rx_queues; i++) rxdr[i].count = rxdr->count; + } if(netif_running(adapter->netdev)) { /* Try to get new resources before deleting old */ @@ -999,17 +979,18 @@ e1000_free_desc_rings(struct e1000_adapter *adapter) } } - if (txdr->desc) { + if(txdr->desc) { pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); txdr->desc = NULL; } - if (rxdr->desc) { + if(rxdr->desc) { pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); rxdr->desc = NULL; } kfree(txdr->buffer_info); txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); rxdr->buffer_info = NULL; @@ -1346,11 +1327,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) static int e1000_setup_loopback_test(struct e1000_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; uint32_t rctl; + struct e1000_hw *hw = &adapter->hw; if (hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes) { + hw->media_type == e1000_media_type_internal_serdes) { switch (hw->mac_type) { case e1000_82545: case e1000_82546: @@ -1381,25 +1362,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter) static void e1000_loopback_cleanup(struct e1000_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; uint32_t rctl; uint16_t phy_reg; + struct e1000_hw *hw = &adapter->hw; - rctl = E1000_READ_REG(hw, RCTL); + rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); - E1000_WRITE_REG(hw, RCTL, rctl); + E1000_WRITE_REG(&adapter->hw, RCTL, rctl); switch (hw->mac_type) { case e1000_82571: case e1000_82572: if (hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes) { + hw->media_type == e1000_media_type_internal_serdes){ #define E1000_SERDES_LB_OFF 0x400 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); msec_delay(10); break; } - /* Fall Through */ + /* fall thru for Cu adapters */ case e1000_82545: case e1000_82546: case e1000_82545_rev_3: @@ -1420,7 +1401,7 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; + frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); @@ -1429,7 +1410,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) static int e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) { - frame_size &= ~1; + frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; if(*(skb->data + 3) == 0xFF) { if((*(skb->data + frame_size / 2 + 10) == 0xBE) && (*(skb->data + frame_size / 2 + 12) == 0xAF)) { @@ -1507,25 +1488,14 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) static int e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) { - /* PHY loopback cannot be performed if SoL/IDER - * sessions are active */ - if (e1000_check_phy_reset_block(&adapter->hw)) { - DPRINTK(DRV, ERR, "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); - *data = 0; - goto out; - } - - if ((*data = e1000_setup_desc_rings(adapter))) - goto out; - if ((*data = e1000_setup_loopback_test(adapter))) - goto err_loopback; + if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; + if((*data = e1000_setup_loopback_test(adapter))) + goto err_loopback_setup; *data = e1000_run_loopback_test(adapter); e1000_loopback_cleanup(adapter); - -err_loopback: +err_loopback_setup: e1000_free_desc_rings(adapter); -out: +err_loopback: return *data; } @@ -1647,7 +1617,6 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: - case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber */ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { wol->supported = 0; @@ -1691,7 +1660,6 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: - case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber */ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) return wol->wolopts ? -EOPNOTSUPP : 0; @@ -1753,21 +1721,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data) mod_timer(&adapter->blink_timer, jiffies); msleep_interruptible(data * 1000); del_timer_sync(&adapter->blink_timer); - } else if (adapter->hw.mac_type < e1000_82573) { - E1000_WRITE_REG(&adapter->hw, LEDCTL, - (E1000_LEDCTL_LED2_BLINK_RATE | - E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | - (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | - (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | - (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); + } + else if(adapter->hw.mac_type < e1000_82573) { + E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | + E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | + (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | + (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); msleep_interruptible(data * 1000); - } else { - E1000_WRITE_REG(&adapter->hw, LEDCTL, - (E1000_LEDCTL_LED2_BLINK_RATE | - E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | - (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | - (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | - (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); + } + else { + E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | + E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | + (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | + (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); msleep_interruptible(data * 1000); } @@ -1800,43 +1768,19 @@ e1000_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) { struct e1000_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_E1000_MQ - uint64_t *queue_stat; - int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t); - int j, k; -#endif int i; e1000_update_stats(adapter); - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; - data[i] = (e1000_gstrings_stats[i].sizeof_stat == + for(i = 0; i < E1000_STATS_LEN; i++) { + char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; + data[i] = (e1000_gstrings_stats[i].sizeof_stat == sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; } -#ifdef CONFIG_E1000_MQ - for (j = 0; j < adapter->num_tx_queues; j++) { - queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats; - for (k = 0; k < stat_count; k++) - data[i + k] = queue_stat[k]; - i += k; - } - for (j = 0; j < adapter->num_rx_queues; j++) { - queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats; - for (k = 0; k < stat_count; k++) - data[i + k] = queue_stat[k]; - i += k; - } -#endif -/* BUG_ON(i != E1000_STATS_LEN); */ } static void e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) { -#ifdef CONFIG_E1000_MQ - struct e1000_adapter *adapter = netdev_priv(netdev); -#endif - uint8_t *p = data; int i; switch(stringset) { @@ -1845,26 +1789,11 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) E1000_TEST_LEN*ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - memcpy(p, e1000_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } -#ifdef CONFIG_E1000_MQ - for (i = 0; i < adapter->num_tx_queues; i++) { - sprintf(p, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + for (i=0; i < E1000_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); } - for (i = 0; i < adapter->num_rx_queues; i++) { - sprintf(p, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - } -#endif -/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ break; } } diff --git a/trunk/drivers/net/e1000/e1000_hw.c b/trunk/drivers/net/e1000/e1000_hw.c index 2437d362ff63..136fc031e4ad 100644 --- a/trunk/drivers/net/e1000/e1000_hw.c +++ b/trunk/drivers/net/e1000/e1000_hw.c @@ -318,8 +318,6 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82546GB_PCIE: - case E1000_DEV_ID_82546GB_QUAD_COPPER: - case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: hw->mac_type = e1000_82546_rev_3; break; case E1000_DEV_ID_82541EI: @@ -641,7 +639,6 @@ e1000_init_hw(struct e1000_hw *hw) uint16_t cmd_mmrbc; uint16_t stat_mmrbc; uint32_t mta_size; - uint32_t ctrl_ext; DEBUGFUNC("e1000_init_hw"); @@ -738,6 +735,7 @@ e1000_init_hw(struct e1000_hw *hw) break; case e1000_82571: case e1000_82572: + ctrl |= (1 << 22); case e1000_82573: ctrl |= E1000_TXDCTL_COUNT_DESC; break; @@ -777,15 +775,6 @@ e1000_init_hw(struct e1000_hw *hw) */ e1000_clear_hw_cntrs(hw); - if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || - hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { - ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); - /* Relaxed ordering must be disabled to avoid a parity - * error crash in a PCI slot. */ - ctrl_ext |= E1000_CTRL_EXT_RO_DIS; - E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); - } - return ret_val; } @@ -849,11 +838,6 @@ e1000_setup_link(struct e1000_hw *hw) DEBUGFUNC("e1000_setup_link"); - /* In the case of the phy reset being blocked, we already have a link. - * We do not have to set it up again. */ - if (e1000_check_phy_reset_block(hw)) - return E1000_SUCCESS; - /* Read and store word 0x0F of the EEPROM. This word contains bits * that determine the hardware's default PAUSE (flow control) mode, * a bit that determines whether the HW defaults to enabling or @@ -1945,19 +1929,14 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) void e1000_config_collision_dist(struct e1000_hw *hw) { - uint32_t tctl, coll_dist; + uint32_t tctl; DEBUGFUNC("e1000_config_collision_dist"); - if (hw->mac_type < e1000_82543) - coll_dist = E1000_COLLISION_DISTANCE_82542; - else - coll_dist = E1000_COLLISION_DISTANCE; - tctl = E1000_READ_REG(hw, TCTL); tctl &= ~E1000_TCTL_COLD; - tctl |= coll_dist << E1000_COLD_SHIFT; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; E1000_WRITE_REG(hw, TCTL, tctl); E1000_WRITE_FLUSH(hw); @@ -3003,8 +2982,6 @@ e1000_phy_hw_reset(struct e1000_hw *hw) if (hw->mac_type < e1000_82571) msec_delay(10); - else - udelay(100); E1000_WRITE_REG(hw, CTRL, ctrl); E1000_WRITE_FLUSH(hw); @@ -3904,16 +3881,14 @@ e1000_read_eeprom(struct e1000_hw *hw, return -E1000_ERR_EEPROM; } - /* FLASH reads without acquiring the semaphore are safe */ - if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && - hw->eeprom.use_eerd == FALSE) { - switch (hw->mac_type) { - default: - /* Prepare the EEPROM for reading */ - if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) - return -E1000_ERR_EEPROM; - break; - } + /* FLASH reads without acquiring the semaphore are safe in 82573-based + * controllers. + */ + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || + (hw->mac_type != e1000_82573)) { + /* Prepare the EEPROM for reading */ + if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; } if(eeprom->use_eerd == TRUE) { @@ -6745,12 +6720,6 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) break; } - /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high. - * Need to wait for PHY configuration completion before accessing NVM - * and PHY. */ - if (hw->mac_type == e1000_82573) - msec_delay(25); - return E1000_SUCCESS; } diff --git a/trunk/drivers/net/e1000/e1000_hw.h b/trunk/drivers/net/e1000/e1000_hw.h index 0b8f6f2b774b..7caa35748cea 100644 --- a/trunk/drivers/net/e1000/e1000_hw.h +++ b/trunk/drivers/net/e1000/e1000_hw.h @@ -439,7 +439,6 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); #define E1000_DEV_ID_82546GB_FIBER 0x107A #define E1000_DEV_ID_82546GB_SERDES 0x107B #define E1000_DEV_ID_82546GB_PCIE 0x108A -#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 #define E1000_DEV_ID_82547EI 0x1019 #define E1000_DEV_ID_82571EB_COPPER 0x105E #define E1000_DEV_ID_82571EB_FIBER 0x105F @@ -450,7 +449,6 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); #define E1000_DEV_ID_82573E 0x108B #define E1000_DEV_ID_82573E_IAMT 0x108C #define E1000_DEV_ID_82573L 0x109A -#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 #define NODE_ADDRESS_SIZE 6 @@ -1499,7 +1497,6 @@ struct e1000_hw { #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ #define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ -#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 #define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 @@ -1957,23 +1954,6 @@ struct e1000_host_command_info { #define E1000_MDALIGN 4096 -/* PCI-Ex registers */ - -/* PCI-Ex Control Register */ -#define E1000_GCR_RXD_NO_SNOOP 0x00000001 -#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 -#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 -#define E1000_GCR_TXD_NO_SNOOP 0x00000008 -#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 -#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 - -#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ - E1000_GCR_RXDSCW_NO_SNOOP | \ - E1000_GCR_RXDSCR_NO_SNOOP | \ - E1000_GCR TXD_NO_SNOOP | \ - E1000_GCR_TXDSCW_NO_SNOOP | \ - E1000_GCR_TXDSCR_NO_SNOOP) - #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 /* Function Active and Power State to MNG */ #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 @@ -2097,10 +2077,7 @@ struct e1000_host_command_info { /* Collision related configuration parameters */ #define E1000_COLLISION_THRESHOLD 15 #define E1000_CT_SHIFT 4 -/* Collision distance is a 0-based value that applies to - * half-duplex-capable hardware only. */ -#define E1000_COLLISION_DISTANCE 63 -#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_COLLISION_DISTANCE 64 #define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE #define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE #define E1000_COLD_SHIFT 12 diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c index d0a5d1656c5f..438a931fd55d 100644 --- a/trunk/drivers/net/e1000/e1000_main.c +++ b/trunk/drivers/net/e1000/e1000_main.c @@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #else #define DRIVERNAPI "-NAPI" #endif -#define DRV_VERSION "6.3.9-k2"DRIVERNAPI +#define DRV_VERSION "6.1.16-k2"DRIVERNAPI char e1000_driver_version[] = DRV_VERSION; static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; @@ -97,9 +97,7 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x108A), INTEL_E1000_ETHERNET_DEVICE(0x108B), INTEL_E1000_ETHERNET_DEVICE(0x108C), - INTEL_E1000_ETHERNET_DEVICE(0x1099), INTEL_E1000_ETHERNET_DEVICE(0x109A), - INTEL_E1000_ETHERNET_DEVICE(0x10B5), /* required last entry */ {0,} }; @@ -173,11 +171,9 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring); #endif static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); + struct e1000_rx_ring *rx_ring); static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count); + struct e1000_rx_ring *rx_ring); static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); @@ -323,75 +319,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) } } } - -/** - * e1000_release_hw_control - release control of the h/w to f/w - * @adapter: address of board private structure - * - * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that the - * driver is no longer loaded. For AMT version (only with 82573) i - * of the f/w this means that the netowrk i/f is closed. - * - **/ - -static inline void -e1000_release_hw_control(struct e1000_adapter *adapter) -{ - uint32_t ctrl_ext; - uint32_t swsm; - - /* Let firmware taken over control of h/w */ - switch (adapter->hw.mac_type) { - case e1000_82571: - case e1000_82572: - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); - E1000_WRITE_REG(&adapter->hw, CTRL_EXT, - ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); - break; - case e1000_82573: - swsm = E1000_READ_REG(&adapter->hw, SWSM); - E1000_WRITE_REG(&adapter->hw, SWSM, - swsm & ~E1000_SWSM_DRV_LOAD); - default: - break; - } -} - -/** - * e1000_get_hw_control - get control of the h/w from f/w - * @adapter: address of board private structure - * - * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. - * For ASF and Pass Through versions of f/w this means that - * the driver is loaded. For AMT version (only with 82573) - * of the f/w this means that the netowrk i/f is open. - * - **/ - -static inline void -e1000_get_hw_control(struct e1000_adapter *adapter) -{ - uint32_t ctrl_ext; - uint32_t swsm; - /* Let firmware know the driver has taken over */ - switch (adapter->hw.mac_type) { - case e1000_82571: - case e1000_82572: - ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); - E1000_WRITE_REG(&adapter->hw, CTRL_EXT, - ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); - break; - case e1000_82573: - swsm = E1000_READ_REG(&adapter->hw, SWSM); - E1000_WRITE_REG(&adapter->hw, SWSM, - swsm | E1000_SWSM_DRV_LOAD); - break; - default: - break; - } -} - + int e1000_up(struct e1000_adapter *adapter) { @@ -415,14 +343,8 @@ e1000_up(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); - /* call E1000_DESC_UNUSED which always leaves - * at least 1 descriptor unused to make sure - * next_to_use != next_to_clean */ - for (i = 0; i < adapter->num_rx_queues; i++) { - struct e1000_rx_ring *ring = &adapter->rx_ring[i]; - adapter->alloc_rx_buf(adapter, ring, - E1000_DESC_UNUSED(ring)); - } + for (i = 0; i < adapter->num_queues; i++) + adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); #ifdef CONFIG_PCI_MSI if(adapter->hw.mac_type > e1000_82547_rev_2) { @@ -442,12 +364,6 @@ e1000_up(struct e1000_adapter *adapter) return err; } -#ifdef CONFIG_E1000_MQ - e1000_setup_queue_mapping(adapter); -#endif - - adapter->tx_queue_len = netdev->tx_queue_len; - mod_timer(&adapter->watchdog_timer, jiffies); #ifdef CONFIG_E1000_NAPI @@ -462,8 +378,6 @@ void e1000_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && - e1000_check_mng_mode(&adapter->hw); e1000_irq_disable(adapter); #ifdef CONFIG_E1000_MQ @@ -482,7 +396,6 @@ e1000_down(struct e1000_adapter *adapter) #ifdef CONFIG_E1000_NAPI netif_poll_disable(netdev); #endif - netdev->tx_queue_len = adapter->tx_queue_len; adapter->link_speed = 0; adapter->link_duplex = 0; netif_carrier_off(netdev); @@ -492,16 +405,12 @@ e1000_down(struct e1000_adapter *adapter) e1000_clean_all_tx_rings(adapter); e1000_clean_all_rx_rings(adapter); - /* Power down the PHY so no link is implied when interface is down * - * The PHY cannot be powered down if any of the following is TRUE * - * (a) WoL is enabled - * (b) AMT is active - * (c) SoL/IDER session is active */ - if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && + /* If WoL is not enabled and management mode is not IAMT + * Power down the PHY so no link is implied when interface is down */ + if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && adapter->hw.media_type == e1000_media_type_copper && - !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && - !mng_mode_enabled && - !e1000_check_phy_reset_block(&adapter->hw)) { + !e1000_check_mng_mode(&adapter->hw) && + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { uint16_t mii_reg; e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; @@ -513,8 +422,10 @@ e1000_down(struct e1000_adapter *adapter) void e1000_reset(struct e1000_adapter *adapter) { + struct net_device *netdev = adapter->netdev; uint32_t pba, manc; uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; + uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -538,8 +449,15 @@ e1000_reset(struct e1000_adapter *adapter) } if((adapter->hw.mac_type != e1000_82573) && - (adapter->netdev->mtu > E1000_RXBUFFER_8192)) + (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { pba -= 8; /* allocate more FIFO for Tx */ + /* send an XOFF when there is enough space in the + * Rx FIFO to hold one extra full size Rx packet + */ + fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + + ETHERNET_FCS_SIZE + 1; + fc_low_water_mark = fc_high_water_mark + 8; + } if(adapter->hw.mac_type == e1000_82547) { @@ -553,12 +471,10 @@ e1000_reset(struct e1000_adapter *adapter) E1000_WRITE_REG(&adapter->hw, PBA, pba); /* flow control settings */ - /* Set the FC high water mark to 90% of the FIFO size. - * Required to clear last 3 LSB */ - fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; - - adapter->hw.fc_high_water = fc_high_water_mark; - adapter->hw.fc_low_water = fc_high_water_mark - 8; + adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - + fc_high_water_mark; + adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - + fc_low_water_mark; adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; adapter->hw.fc_send_xon = 1; adapter->hw.fc = adapter->hw.original_fc; @@ -601,6 +517,8 @@ e1000_probe(struct pci_dev *pdev, struct net_device *netdev; struct e1000_adapter *adapter; unsigned long mmio_start, mmio_len; + uint32_t ctrl_ext; + uint32_t swsm; static int cards_found = 0; int i, err, pci_using_dac; @@ -794,7 +712,8 @@ e1000_probe(struct pci_dev *pdev, case e1000_82546: case e1000_82546_rev_3: case e1000_82571: - if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ + if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) + && (adapter->hw.media_type == e1000_media_type_copper)) { e1000_read_eeprom(&adapter->hw, EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; @@ -808,36 +727,25 @@ e1000_probe(struct pci_dev *pdev, if(eeprom_data & eeprom_apme_mask) adapter->wol |= E1000_WUFC_MAG; - /* print bus type/speed/width info */ - { - struct e1000_hw *hw = &adapter->hw; - DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", - ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : - (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), - ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" : - (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : - (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : - (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : - (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), - ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : - (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : - (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : - "32-bit")); - } - - for (i = 0; i < 6; i++) - printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); - /* reset the hardware with the new settings */ e1000_reset(adapter); - /* If the controller is 82573 and f/w is AMT, do not set - * DRV_LOAD until the interface is up. For all other cases, - * let the f/w know that the h/w is now under the control - * of the driver. */ - if (adapter->hw.mac_type != e1000_82573 || - !e1000_check_mng_mode(&adapter->hw)) - e1000_get_hw_control(adapter); + /* Let firmware know the driver has taken over */ + switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm | E1000_SWSM_DRV_LOAD); + break; + default: + break; + } strcpy(netdev->name, "eth%d"); if((err = register_netdev(netdev))) @@ -874,7 +782,8 @@ e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - uint32_t manc; + uint32_t ctrl_ext; + uint32_t manc, swsm; #ifdef CONFIG_E1000_NAPI int i; #endif @@ -890,13 +799,26 @@ e1000_remove(struct pci_dev *pdev) } } - /* Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. */ - e1000_release_hw_control(adapter); + switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm & ~E1000_SWSM_DRV_LOAD); + break; + + default: + break; + } unregister_netdev(netdev); #ifdef CONFIG_E1000_NAPI - for (i = 0; i < adapter->num_rx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) __dev_put(&adapter->polling_netdev[i]); #endif @@ -1001,34 +923,15 @@ e1000_sw_init(struct e1000_adapter *adapter) switch (hw->mac_type) { case e1000_82571: case e1000_82572: - /* These controllers support 2 tx queues, but with a single - * qdisc implementation, multiple tx queues aren't quite as - * interesting. If we can find a logical way of mapping - * flows to a queue, then perhaps we can up the num_tx_queue - * count back to its default. Until then, we run the risk of - * terrible performance due to SACK overload. */ - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 2; + adapter->num_queues = 2; break; default: - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; + adapter->num_queues = 1; break; } - adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); - adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); - DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", - adapter->num_rx_queues, - ((adapter->num_rx_queues == 1) - ? ((num_online_cpus() > 1) - ? "(due to unsupported feature in current adapter)" - : "(due to unsupported system configuration)") - : "")); - DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", - adapter->num_tx_queues); + adapter->num_queues = min(adapter->num_queues, num_online_cpus()); #else - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; + adapter->num_queues = 1; #endif if (e1000_alloc_queues(adapter)) { @@ -1037,14 +940,17 @@ e1000_sw_init(struct e1000_adapter *adapter) } #ifdef CONFIG_E1000_NAPI - for (i = 0; i < adapter->num_rx_queues; i++) { + for (i = 0; i < adapter->num_queues; i++) { adapter->polling_netdev[i].priv = adapter; adapter->polling_netdev[i].poll = &e1000_clean; adapter->polling_netdev[i].weight = 64; dev_hold(&adapter->polling_netdev[i]); set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); } - spin_lock_init(&adapter->tx_queue_lock); +#endif + +#ifdef CONFIG_E1000_MQ + e1000_setup_queue_mapping(adapter); #endif atomic_set(&adapter->irq_sem, 1); @@ -1067,13 +973,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter) { int size; - size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; + size = sizeof(struct e1000_tx_ring) * adapter->num_queues; adapter->tx_ring = kmalloc(size, GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; memset(adapter->tx_ring, 0, size); - size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; + size = sizeof(struct e1000_rx_ring) * adapter->num_queues; adapter->rx_ring = kmalloc(size, GFP_KERNEL); if (!adapter->rx_ring) { kfree(adapter->tx_ring); @@ -1082,7 +988,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter) memset(adapter->rx_ring, 0, size); #ifdef CONFIG_E1000_NAPI - size = sizeof(struct net_device) * adapter->num_rx_queues; + size = sizeof(struct net_device) * adapter->num_queues; adapter->polling_netdev = kmalloc(size, GFP_KERNEL); if (!adapter->polling_netdev) { kfree(adapter->tx_ring); @@ -1092,14 +998,6 @@ e1000_alloc_queues(struct e1000_adapter *adapter) memset(adapter->polling_netdev, 0, size); #endif -#ifdef CONFIG_E1000_MQ - adapter->rx_sched_call_data.func = e1000_rx_schedule; - adapter->rx_sched_call_data.info = adapter->netdev; - - adapter->cpu_netdev = alloc_percpu(struct net_device *); - adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); -#endif - return E1000_SUCCESS; } @@ -1119,15 +1017,14 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter) lock_cpu_hotplug(); i = 0; for_each_online_cpu(cpu) { - *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; + *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; /* This is incomplete because we'd like to assign separate * physical cpus to these netdev polling structures and * avoid saturating a subset of cpus. */ - if (i < adapter->num_rx_queues) { + if (i < adapter->num_queues) { *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; - adapter->rx_ring[i].cpu = cpu; - cpu_set(cpu, adapter->cpumask); + adapter->cpu_for_queue[i] = cpu; } else *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; @@ -1174,12 +1071,6 @@ e1000_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); } - /* If AMT is enabled, let the firmware know that the network - * interface is now open */ - if (adapter->hw.mac_type == e1000_82573 && - e1000_check_mng_mode(&adapter->hw)) - e1000_get_hw_control(adapter); - return E1000_SUCCESS; err_up: @@ -1218,13 +1109,6 @@ e1000_close(struct net_device *netdev) E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); } - - /* If AMT is enabled, let the firmware know that the network - * interface is now closed */ - if (adapter->hw.mac_type == e1000_82573 && - e1000_check_mng_mode(&adapter->hw)) - e1000_release_hw_control(adapter); - return 0; } @@ -1345,7 +1229,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->num_tx_queues; i++) { + for (i = 0; i < adapter->num_queues; i++) { err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (err) { DPRINTK(PROBE, ERR, @@ -1370,11 +1254,10 @@ e1000_configure_tx(struct e1000_adapter *adapter) uint64_t tdba; struct e1000_hw *hw = &adapter->hw; uint32_t tdlen, tctl, tipg, tarc; - uint32_t ipgr1, ipgr2; /* Setup the HW Tx Head and Tail descriptor pointers */ - switch (adapter->num_tx_queues) { + switch (adapter->num_queues) { case 2: tdba = adapter->tx_ring[1].dma; tdlen = adapter->tx_ring[1].count * @@ -1404,26 +1287,22 @@ e1000_configure_tx(struct e1000_adapter *adapter) /* Set the default values for the Tx Inter Packet Gap timer */ - if (hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes) - tipg = DEFAULT_82543_TIPG_IPGT_FIBER; - else - tipg = DEFAULT_82543_TIPG_IPGT_COPPER; - switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: tipg = DEFAULT_82542_TIPG_IPGT; - ipgr1 = DEFAULT_82542_TIPG_IPGR1; - ipgr2 = DEFAULT_82542_TIPG_IPGR2; + tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: - ipgr1 = DEFAULT_82543_TIPG_IPGR1; - ipgr2 = DEFAULT_82543_TIPG_IPGR2; - break; + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } - tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; - tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; E1000_WRITE_REG(hw, TIPG, tipg); /* Set the Tx Interrupt Delay register */ @@ -1575,8 +1454,6 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, rxdr->next_to_clean = 0; rxdr->next_to_use = 0; - rxdr->rx_skb_top = NULL; - rxdr->rx_skb_prev = NULL; return 0; } @@ -1598,7 +1475,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->num_rx_queues; i++) { + for (i = 0; i < adapter->num_queues; i++) { err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (err) { DPRINTK(PROBE, ERR, @@ -1633,10 +1510,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); - if (adapter->hw.mac_type > e1000_82543) - rctl |= E1000_RCTL_SECRC; - - if (adapter->hw.tbi_compatibility_on == 1) + if(adapter->hw.tbi_compatibility_on == 1) rctl |= E1000_RCTL_SBP; else rctl &= ~E1000_RCTL_SBP; @@ -1764,21 +1638,16 @@ e1000_configure_rx(struct e1000_adapter *adapter) } if (hw->mac_type >= e1000_82571) { - ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); /* Reset delay timers after every interrupt */ + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_CANC; -#ifdef CONFIG_E1000_NAPI - /* Auto-Mask interrupts upon ICR read. */ - ctrl_ext |= E1000_CTRL_EXT_IAME; -#endif E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); - E1000_WRITE_REG(hw, IAM, ~0); E1000_WRITE_FLUSH(hw); } /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - switch (adapter->num_rx_queues) { + switch (adapter->num_queues) { #ifdef CONFIG_E1000_MQ case 2: rdba = adapter->rx_ring[1].dma; @@ -1805,7 +1674,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) } #ifdef CONFIG_E1000_MQ - if (adapter->num_rx_queues > 1) { + if (adapter->num_queues > 1) { uint32_t random[10]; get_random_bytes(&random[0], 40); @@ -1815,7 +1684,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) E1000_WRITE_REG(hw, RSSIM, 0); } - switch (adapter->num_rx_queues) { + switch (adapter->num_queues) { case 2: default: reta = 0x00800080; @@ -1907,7 +1776,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter) { int i; - for (i = 0; i < adapter->num_tx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); } @@ -1920,10 +1789,12 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; } - if (buffer_info->skb) + if(buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); - memset(buffer_info, 0, sizeof(struct e1000_buffer)); + buffer_info->skb = NULL; + } } /** @@ -1972,7 +1843,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter) { int i; - for (i = 0; i < adapter->num_tx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); } @@ -2016,7 +1887,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter) { int i; - for (i = 0; i < adapter->num_rx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); } @@ -2042,6 +1913,8 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, for(i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if(buffer_info->skb) { + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, @@ -2049,30 +1922,19 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; - } - ps_page = &rx_ring->ps_page[i]; - ps_page_dma = &rx_ring->ps_page_dma[i]; - for (j = 0; j < adapter->rx_ps_pages; j++) { - if (!ps_page->ps_page[j]) break; - pci_unmap_page(pdev, - ps_page_dma->ps_page_dma[j], - PAGE_SIZE, PCI_DMA_FROMDEVICE); - ps_page_dma->ps_page_dma[j] = 0; - put_page(ps_page->ps_page[j]); - ps_page->ps_page[j] = NULL; - } - } - - /* there also may be some cached data in our adapter */ - if (rx_ring->rx_skb_top) { - dev_kfree_skb(rx_ring->rx_skb_top); - /* rx_skb_prev will be wiped out by rx_skb_top */ - rx_ring->rx_skb_top = NULL; - rx_ring->rx_skb_prev = NULL; + for(j = 0; j < adapter->rx_ps_pages; j++) { + if(!ps_page->ps_page[j]) break; + pci_unmap_single(pdev, + ps_page_dma->ps_page_dma[j], + PAGE_SIZE, PCI_DMA_FROMDEVICE); + ps_page_dma->ps_page_dma[j] = 0; + put_page(ps_page->ps_page[j]); + ps_page->ps_page[j] = NULL; + } + } } - size = sizeof(struct e1000_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); size = sizeof(struct e1000_ps_page) * rx_ring->count; @@ -2101,7 +1963,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter) { int i; - for (i = 0; i < adapter->num_rx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); } @@ -2143,9 +2005,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) if(netif_running(netdev)) { e1000_configure_rx(adapter); - /* No need to loop, because 82542 supports only 1 queue */ - struct e1000_rx_ring *ring = &adapter->rx_ring[0]; - adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); } } @@ -2344,7 +2204,7 @@ static void e1000_watchdog_task(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; uint32_t link; e1000_check_for_link(&adapter->hw); @@ -2371,21 +2231,6 @@ e1000_watchdog_task(struct e1000_adapter *adapter) adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex"); - /* tweak tx_queue_len according to speed/duplex */ - netdev->tx_queue_len = adapter->tx_queue_len; - adapter->tx_timeout_factor = 1; - if (adapter->link_duplex == HALF_DUPLEX) { - switch (adapter->link_speed) { - case SPEED_10: - netdev->tx_queue_len = 10; - adapter->tx_timeout_factor = 8; - break; - case SPEED_100: - netdev->tx_queue_len = 100; - break; - } - } - netif_carrier_on(netdev); netif_wake_queue(netdev); mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); @@ -2418,10 +2263,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) e1000_update_adaptive(&adapter->hw); -#ifdef CONFIG_E1000_MQ - txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); -#endif - if (!netif_carrier_ok(netdev)) { + if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going @@ -2472,7 +2314,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, { #ifdef NETIF_F_TSO struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; unsigned int i; uint32_t cmd_length = 0; uint16_t ipcse = 0, tucse, mss; @@ -2522,7 +2363,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, i = tx_ring->next_to_use; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; context_desc->lower_setup.ip_fields.ipcss = ipcss; context_desc->lower_setup.ip_fields.ipcso = ipcso; @@ -2534,16 +2374,14 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); - buffer_info->time_stamp = jiffies; - if (++i == tx_ring->count) i = 0; tx_ring->next_to_use = i; - return TRUE; + return 1; } #endif - return FALSE; + return 0; } static inline boolean_t @@ -2551,7 +2389,6 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, struct sk_buff *skb) { struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; unsigned int i; uint8_t css; @@ -2559,7 +2396,6 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, css = skb->h.raw - skb->data; i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->upper_setup.tcp_fields.tucss = css; @@ -2568,8 +2404,6 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); - buffer_info->time_stamp = jiffies; - if (unlikely(++i == tx_ring->count)) i = 0; tx_ring->next_to_use = i; @@ -2854,30 +2688,11 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * overrun the FIFO, adjust the max buffer len if mss * drops. */ if(mss) { - uint8_t hdr_len; max_per_txd = min(mss << 2, max_per_txd); max_txd_pwr = fls(max_per_txd) - 1; - - /* TSO Workaround for 82571/2 Controllers -- if skb->data - * points to just header, pull a few bytes of payload from - * frags into skb->data */ - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); - if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && - (adapter->hw.mac_type == e1000_82571 || - adapter->hw.mac_type == e1000_82572)) { - unsigned int pull_size; - pull_size = min((unsigned int)4, skb->data_len); - if (!__pskb_pull_tail(skb, pull_size)) { - printk(KERN_ERR "__pskb_pull_tail failed.\n"); - dev_kfree_skb_any(skb); - return -EFAULT; - } - len = skb->len - skb->data_len; - } } if((mss) || (skb->ip_summed == CHECKSUM_HW)) - /* reserve a descriptor for the offload context */ count++; count++; #else @@ -2911,6 +2726,27 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if(adapter->pcix_82544) count += nr_frags; +#ifdef NETIF_F_TSO + /* TSO Workaround for 82571/2 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + if (skb_shinfo(skb)->tso_size) { + uint8_t hdr_len; + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && + (adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572)) { + unsigned int pull_size; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + printk(KERN_ERR "__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return -EFAULT; + } + } + } +#endif + if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) e1000_transfer_dhcp_info(adapter, skb); @@ -2997,7 +2833,6 @@ e1000_tx_timeout_task(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); - adapter->tx_timeout_count++; e1000_down(adapter); e1000_up(adapter); } @@ -3015,7 +2850,7 @@ e1000_get_stats(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); - /* only return the current stats */ + e1000_update_stats(adapter); return &adapter->net_stats; } @@ -3036,50 +2871,49 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); - return -EINVAL; + return -EINVAL; } - /* Adapter-specific max frame size limits. */ - switch (adapter->hw.mac_type) { - case e1000_82542_rev2_0: - case e1000_82542_rev2_1: - case e1000_82573: - if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { - DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); - return -EINVAL; - } - break; - case e1000_82571: - case e1000_82572: #define MAX_STD_JUMBO_FRAME_SIZE 9234 - if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); - return -EINVAL; - } - break; - default: - /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ - break; + /* might want this to be bigger enum check... */ + /* 82571 controllers limit jumbo frame size to 10500 bytes */ + if ((adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572) && + max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported " + "on 82571 and 82572 controllers.\n"); + return -EINVAL; + } + + if(adapter->hw.mac_type == e1000_82573 && + max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " + "on 82573\n"); + return -EINVAL; } - /* since the driver code now supports splitting a packet across - * multiple descriptors, most of the fifo related limitations on - * jumbo frame traffic have gone away. - * simply use 2k descriptors for everything. - * - * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN - * means we reserve 2 more, this pushes us to allocate from the next - * larger slab size - * i.e. RXBUFFER_2048 --> size-4096 slab */ - - /* recent hardware supports 1KB granularity */ - if (adapter->hw.mac_type > e1000_82547_rev_2) { - adapter->rx_buffer_len = - ((max_frame < E1000_RXBUFFER_2048) ? - max_frame : E1000_RXBUFFER_2048); + if(adapter->hw.mac_type > e1000_82547_rev_2) { + adapter->rx_buffer_len = max_frame; E1000_ROUNDUP(adapter->rx_buffer_len, 1024); - } else - adapter->rx_buffer_len = E1000_RXBUFFER_2048; + } else { + if(unlikely((adapter->hw.mac_type < e1000_82543) && + (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " + "on 82542\n"); + return -EINVAL; + + } else { + if(max_frame <= E1000_RXBUFFER_2048) { + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + } else if(max_frame <= E1000_RXBUFFER_4096) { + adapter->rx_buffer_len = E1000_RXBUFFER_4096; + } else if(max_frame <= E1000_RXBUFFER_8192) { + adapter->rx_buffer_len = E1000_RXBUFFER_8192; + } else if(max_frame <= E1000_RXBUFFER_16384) { + adapter->rx_buffer_len = E1000_RXBUFFER_16384; + } + } + } netdev->mtu = new_mtu; @@ -3203,11 +3037,12 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->net_stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.rlec + adapter->stats.cexterr; - adapter->net_stats.rx_dropped = 0; + adapter->stats.rlec + adapter->stats.mpc + + adapter->stats.cexterr; adapter->net_stats.rx_length_errors = adapter->stats.rlec; adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; + adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; adapter->net_stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ @@ -3275,24 +3110,12 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; uint32_t icr = E1000_READ_REG(hw, ICR); -#ifndef CONFIG_E1000_NAPI +#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) int i; -#else - /* Interrupt Auto-Mask...upon reading ICR, - * interrupts are masked. No need for the - * IMC write, but it does mean we should - * account for it ASAP. */ - if (likely(hw->mac_type >= e1000_82571)) - atomic_inc(&adapter->irq_sem); #endif - if (unlikely(!icr)) { -#ifdef CONFIG_E1000_NAPI - if (hw->mac_type >= e1000_82571) - e1000_irq_enable(adapter); -#endif + if(unlikely(!icr)) return IRQ_NONE; /* Not our interrupt */ - } if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; @@ -3300,19 +3123,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) } #ifdef CONFIG_E1000_NAPI - if (unlikely(hw->mac_type < e1000_82571)) { - atomic_inc(&adapter->irq_sem); - E1000_WRITE_REG(hw, IMC, ~0); - E1000_WRITE_FLUSH(hw); - } + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(hw, IMC, ~0); + E1000_WRITE_FLUSH(hw); #ifdef CONFIG_E1000_MQ if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { - /* We must setup the cpumask once count == 0 since - * each cpu bit is cleared when the work is done. */ - adapter->rx_sched_call_data.cpumask = adapter->cpumask; - atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem); - atomic_set(&adapter->rx_sched_call_data.count, - adapter->num_rx_queues); + cpu_set(adapter->cpu_for_queue[0], + adapter->rx_sched_call_data.cpumask); + for (i = 1; i < adapter->num_queues; i++) { + cpu_set(adapter->cpu_for_queue[i], + adapter->rx_sched_call_data.cpumask); + atomic_inc(&adapter->irq_sem); + } + atomic_set(&adapter->rx_sched_call_data.count, i); smp_call_async_mask(&adapter->rx_sched_call_data); } else { printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); @@ -3364,7 +3187,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) { struct e1000_adapter *adapter; int work_to_do = min(*budget, poll_dev->quota); - int tx_cleaned = 0, i = 0, work_done = 0; + int tx_cleaned, i = 0, work_done = 0; /* Must NOT use netdev_priv macro here. */ adapter = poll_dev->priv; @@ -3375,23 +3198,11 @@ e1000_clean(struct net_device *poll_dev, int *budget) while (poll_dev != &adapter->polling_netdev[i]) { i++; - if (unlikely(i == adapter->num_rx_queues)) + if (unlikely(i == adapter->num_queues)) BUG(); } - if (likely(adapter->num_tx_queues == 1)) { - /* e1000_clean is called per-cpu. This lock protects - * tx_ring[0] from being cleaned by multiple cpus - * simultaneously. A failure obtaining the lock means - * tx_ring[0] is currently being cleaned anyway. */ - if (spin_trylock(&adapter->tx_queue_lock)) { - tx_cleaned = e1000_clean_tx_irq(adapter, - &adapter->tx_ring[0]); - spin_unlock(&adapter->tx_queue_lock); - } - } else - tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); - + tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); adapter->clean_rx(adapter, &adapter->rx_ring[i], &work_done, work_to_do); @@ -3436,19 +3247,17 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); -#ifdef CONFIG_E1000_MQ - tx_ring->tx_stats.bytes += buffer_info->length; -#endif e1000_unmap_and_free_tx_resource(adapter, buffer_info); - memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); + + tx_desc->buffer_addr = 0; + tx_desc->lower.data = 0; + tx_desc->upper.data = 0; if(unlikely(++i == tx_ring->count)) i = 0; } -#ifdef CONFIG_E1000_MQ - tx_ring->tx_stats.packets++; -#endif - + tx_ring->pkt++; + eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } @@ -3467,31 +3276,32 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ adapter->detect_tx_hung = FALSE; - if (tx_ring->buffer_info[eop].dma && - time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + - adapter->tx_timeout_factor * HZ) + if (tx_ring->buffer_info[i].dma && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) && !(E1000_READ_REG(&adapter->hw, STATUS) & - E1000_STATUS_TXOFF)) { + E1000_STATUS_TXOFF)) { /* detected Tx unit hang */ + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" - " Tx Queue <%lu>\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "buffer_info[next_to_clean]\n" + " dma <%llx>\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n", - (unsigned long)((tx_ring - adapter->tx_ring) / - sizeof(struct e1000_tx_ring)), readl(adapter->hw.hw_addr + tx_ring->tdh), readl(adapter->hw.hw_addr + tx_ring->tdt), tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, + i, + (unsigned long long)tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].time_stamp, eop, jiffies, eop_desc->upper.fields.status); @@ -3576,23 +3386,20 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, uint32_t length; uint8_t last_byte; unsigned int i; - int cleaned_count = 0; - boolean_t cleaned = FALSE, multi_descriptor = FALSE; + boolean_t cleaned = FALSE; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); while(rx_desc->status & E1000_RXD_STAT_DD) { buffer_info = &rx_ring->buffer_info[i]; - u8 status; #ifdef CONFIG_E1000_NAPI if(*work_done >= work_to_do) break; (*work_done)++; #endif - status = rx_desc->status; cleaned = TRUE; - cleaned_count++; + pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, @@ -3626,40 +3433,18 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, } } - /* code added for copybreak, this should improve - * performance for small packets with large amounts - * of reassembly being done in the stack */ -#define E1000_CB_LENGTH 256 - if ((length < E1000_CB_LENGTH) && - !rx_ring->rx_skb_top && - /* or maybe (status & E1000_RXD_STAT_EOP) && */ - !multi_descriptor) { - struct sk_buff *new_skb = - dev_alloc_skb(length + NET_IP_ALIGN); - if (new_skb) { - skb_reserve(new_skb, NET_IP_ALIGN); - new_skb->dev = netdev; - memcpy(new_skb->data - NET_IP_ALIGN, - skb->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); - /* save the skb in buffer_info as good */ - buffer_info->skb = skb; - skb = new_skb; - skb_put(skb, length); - } - } - - /* end copybreak code */ + /* Good Receive */ + skb_put(skb, length - ETHERNET_FCS_SIZE); /* Receive Checksum Offload */ e1000_rx_checksum(adapter, - (uint32_t)(status) | + (uint32_t)(rx_desc->status) | ((uint32_t)(rx_desc->errors) << 24), rx_desc->csum, skb); skb->protocol = eth_type_trans(skb, netdev); #ifdef CONFIG_E1000_NAPI if(unlikely(adapter->vlgrp && - (status & E1000_RXD_STAT_VP))) { + (rx_desc->status & E1000_RXD_STAT_VP))) { vlan_hwaccel_receive_skb(skb, adapter->vlgrp, le16_to_cpu(rx_desc->special) & E1000_RXD_SPC_VLAN_MASK); @@ -3677,26 +3462,17 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, } #endif /* CONFIG_E1000_NAPI */ netdev->last_rx = jiffies; -#ifdef CONFIG_E1000_MQ - rx_ring->rx_stats.packets++; - rx_ring->rx_stats.bytes += length; -#endif + rx_ring->pkt++; next_desc: rx_desc->status = 0; + buffer_info->skb = NULL; + if(unlikely(++i == rx_ring->count)) i = 0; - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - cleaned_count = 0; - } - + rx_desc = E1000_RX_DESC(*rx_ring, i); } rx_ring->next_to_clean = i; - - cleaned_count = E1000_DESC_UNUSED(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + adapter->alloc_rx_buf(adapter, rx_ring); return cleaned; } @@ -3725,7 +3501,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, struct sk_buff *skb; unsigned int i, j; uint32_t length, staterr; - int cleaned_count = 0; boolean_t cleaned = FALSE; i = rx_ring->next_to_clean; @@ -3742,7 +3517,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, (*work_done)++; #endif cleaned = TRUE; - cleaned_count++; pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); @@ -3819,28 +3593,18 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, } #endif /* CONFIG_E1000_NAPI */ netdev->last_rx = jiffies; -#ifdef CONFIG_E1000_MQ - rx_ring->rx_stats.packets++; - rx_ring->rx_stats.bytes += length; -#endif + rx_ring->pkt++; next_desc: rx_desc->wb.middle.status_error &= ~0xFF; buffer_info->skb = NULL; + if(unlikely(++i == rx_ring->count)) i = 0; - /* return some buffers to hardware, one at a time is too slow */ - if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); - cleaned_count = 0; - } - + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.middle.status_error); } rx_ring->next_to_clean = i; - - cleaned_count = E1000_DESC_UNUSED(rx_ring); - if (cleaned_count) - adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + adapter->alloc_rx_buf(adapter, rx_ring); return cleaned; } @@ -3852,8 +3616,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count) + struct e1000_rx_ring *rx_ring) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -3866,18 +3629,11 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; - while (cleaned_count--) { - if (!(skb = buffer_info->skb)) - skb = dev_alloc_skb(bufsz); - else { - skb_trim(skb, 0); - goto map_skb; - } - + while(!buffer_info->skb) { + skb = dev_alloc_skb(bufsz); if(unlikely(!skb)) { /* Better luck next round */ - adapter->alloc_rx_buff_failed++; break; } @@ -3914,7 +3670,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; -map_skb: buffer_info->dma = pci_map_single(pdev, skb->data, adapter->rx_buffer_len, @@ -3963,8 +3718,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, - struct e1000_rx_ring *rx_ring, - int cleaned_count) + struct e1000_rx_ring *rx_ring) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -3980,7 +3734,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, ps_page = &rx_ring->ps_page[i]; ps_page_dma = &rx_ring->ps_page_dma[i]; - while (cleaned_count--) { + while(!buffer_info->skb) { rx_desc = E1000_RX_DESC_PS(*rx_ring, i); for(j = 0; j < PS_PAGE_BUFFERS; j++) { @@ -4352,12 +4106,8 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) if((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && - (vid == adapter->mng_vlan_id)) { - /* release control to f/w */ - e1000_release_hw_control(adapter); + (vid == adapter->mng_vlan_id)) return; - } - /* remove VID from filter table */ index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); @@ -4423,9 +4173,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - uint32_t ctrl, ctrl_ext, rctl, manc, status; + uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; uint32_t wufc = adapter->wol; - int retval = 0; netif_device_detach(netdev); @@ -4471,21 +4220,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, WUFC, wufc); - retval = pci_enable_wake(pdev, PCI_D3hot, 1); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); - retval = pci_enable_wake(pdev, PCI_D3cold, 1); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); + pci_enable_wake(pdev, 3, 1); + pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ } else { E1000_WRITE_REG(&adapter->hw, WUC, 0); E1000_WRITE_REG(&adapter->hw, WUFC, 0); - retval = pci_enable_wake(pdev, PCI_D3hot, 0); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); - retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); + pci_enable_wake(pdev, 3, 0); + pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ } pci_save_state(pdev); @@ -4496,24 +4237,29 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) if(manc & E1000_MANC_SMBUS_EN) { manc |= E1000_MANC_ARP_EN; E1000_WRITE_REG(&adapter->hw, MANC, manc); - retval = pci_enable_wake(pdev, PCI_D3hot, 1); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); - retval = pci_enable_wake(pdev, PCI_D3cold, 1); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); + pci_enable_wake(pdev, 3, 1); + pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ } } - /* Release control of h/w to f/w. If f/w is AMT enabled, this - * would have already happened in close and is redundant. */ - e1000_release_hw_control(adapter); + switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm & ~E1000_SWSM_DRV_LOAD); + break; + default: + break; + } pci_disable_device(pdev); - - retval = pci_set_power_state(pdev, pci_choose_state(pdev, state)); - if (retval) - DPRINTK(PROBE, ERR, "Error in setting power state\n"); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } @@ -4523,21 +4269,16 @@ e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - int retval; - uint32_t manc, ret_val; + uint32_t manc, ret_val, swsm; + uint32_t ctrl_ext; - retval = pci_set_power_state(pdev, PCI_D0); - if (retval) - DPRINTK(PROBE, ERR, "Error in setting power state\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); ret_val = pci_enable_device(pdev); pci_set_master(pdev); - retval = pci_enable_wake(pdev, PCI_D3hot, 0); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 wake\n"); - retval = pci_enable_wake(pdev, PCI_D3cold, 0); - if (retval) - DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n"); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); e1000_reset(adapter); E1000_WRITE_REG(&adapter->hw, WUS, ~0); @@ -4554,13 +4295,21 @@ e1000_resume(struct pci_dev *pdev) E1000_WRITE_REG(&adapter->hw, MANC, manc); } - /* If the controller is 82573 and f/w is AMT, do not set - * DRV_LOAD until the interface is up. For all other cases, - * let the f/w know that the h/w is now under the control - * of the driver. */ - if (adapter->hw.mac_type != e1000_82573 || - !e1000_check_mng_mode(&adapter->hw)) - e1000_get_hw_control(adapter); + switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm | E1000_SWSM_DRV_LOAD); + break; + default: + break; + } return 0; } @@ -4578,9 +4327,6 @@ e1000_netpoll(struct net_device *netdev) disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter, adapter->tx_ring); -#ifndef CONFIG_E1000_NAPI - adapter->clean_rx(adapter, adapter->rx_ring); -#endif enable_irq(adapter->pdev->irq); } #endif diff --git a/trunk/drivers/net/e1000/e1000_param.c b/trunk/drivers/net/e1000/e1000_param.c index 0a7918c62557..ccbbe5ad8e0f 100644 --- a/trunk/drivers/net/e1000/e1000_param.c +++ b/trunk/drivers/net/e1000/e1000_param.c @@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); * * Valid Range: 100-100000 (0=off, 1=dynamic) * - * Default Value: 8000 + * Default Value: 1 */ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); @@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter) } else { tx_ring->count = opt.def; } - for (i = 0; i < adapter->num_tx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) tx_ring[i].count = tx_ring->count; } { /* Receive Descriptor Count */ @@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter) } else { rx_ring->count = opt.def; } - for (i = 0; i < adapter->num_rx_queues; i++) + for (i = 0; i < adapter->num_queues; i++) rx_ring[i].count = rx_ring->count; } { /* Checksum Offload Enable/Disable */ @@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter) e1000_validate_option(&fc, &opt, adapter); adapter->hw.fc = adapter->hw.original_fc = fc; } else { - adapter->hw.fc = adapter->hw.original_fc = opt.def; + adapter->hw.fc = opt.def; } } { /* Transmit Interrupt Delay */ @@ -584,12 +584,6 @@ e1000_check_copper_options(struct e1000_adapter *adapter) .p = dplx_list }} }; - if (e1000_check_phy_reset_block(&adapter->hw)) { - DPRINTK(PROBE, INFO, - "Link active due to SoL/IDER Session. " - "Speed/Duplex/AutoNeg parameter ignored.\n"); - return; - } if (num_Duplex > bd) { dplx = Duplex[bd]; e1000_validate_option(&dplx, &opt, adapter); diff --git a/trunk/drivers/net/mv643xx_eth.c b/trunk/drivers/net/mv643xx_eth.c index 40ae36b20c9d..22c3a37bba5a 100644 --- a/trunk/drivers/net/mv643xx_eth.c +++ b/trunk/drivers/net/mv643xx_eth.c @@ -35,8 +35,6 @@ #include #include #include -#include -#include #include #include @@ -57,15 +55,13 @@ /* Constants */ #define VLAN_HLEN 4 #define FCS_LEN 4 -#define DMA_ALIGN 8 /* hw requires 8-byte alignment */ -#define HW_IP_ALIGN 2 /* hw aligns IP header */ -#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN +#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) -#define INT_UNMASK_ALL 0x0007ffff -#define INT_UNMASK_ALL_EXT 0x0011ffff -#define INT_MASK_ALL 0x00000000 -#define INT_MASK_ALL_EXT 0x00000000 +#define INT_CAUSE_UNMASK_ALL 0x0007ffff +#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff +#define INT_CAUSE_MASK_ALL 0x00000000 +#define INT_CAUSE_MASK_ALL_EXT 0x00000000 #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT @@ -82,9 +78,8 @@ static int eth_port_link_is_up(unsigned int eth_port_num); static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *MacAddr); -static void eth_port_set_multicast_list(struct net_device *); -static int mv643xx_eth_open(struct net_device *); -static int mv643xx_eth_stop(struct net_device *); +static int mv643xx_eth_real_open(struct net_device *); +static int mv643xx_eth_real_stop(struct net_device *); static int mv643xx_eth_change_mtu(struct net_device *, int); static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); static void eth_port_init_mac_tables(unsigned int eth_port_num); @@ -129,8 +124,15 @@ static inline void mv_write(int offset, u32 data) */ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) { - if ((new_mtu > 9500) || (new_mtu < 64)) + struct mv643xx_private *mp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&mp->lock, flags); + + if ((new_mtu > 9500) || (new_mtu < 64)) { + spin_unlock_irqrestore(&mp->lock, flags); return -EINVAL; + } dev->mtu = new_mtu; /* @@ -140,13 +142,17 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) * to memory is full, which might fail the open function. */ if (netif_running(dev)) { - mv643xx_eth_stop(dev); - if (mv643xx_eth_open(dev)) + if (mv643xx_eth_real_stop(dev)) + printk(KERN_ERR + "%s: Fatal error on stopping device\n", + dev->name); + if (mv643xx_eth_real_open(dev)) printk(KERN_ERR "%s: Fatal error on opening device\n", dev->name); } + spin_unlock_irqrestore(&mp->lock, flags); return 0; } @@ -164,19 +170,15 @@ static void mv643xx_eth_rx_task(void *data) struct mv643xx_private *mp = netdev_priv(dev); struct pkt_info pkt_info; struct sk_buff *skb; - int unaligned; if (test_and_set_bit(0, &mp->rx_task_busy)) panic("%s: Error in test_set_bit / clear_bit", dev->name); while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { - skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); + skb = dev_alloc_skb(RX_SKB_SIZE); if (!skb) break; mp->rx_ring_skbs++; - unaligned = (u32)skb->data & (DMA_ALIGN - 1); - if (unaligned) - skb_reserve(skb, DMA_ALIGN - unaligned); pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; pkt_info.byte_cnt = RX_SKB_SIZE; pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, @@ -187,7 +189,7 @@ static void mv643xx_eth_rx_task(void *data) "%s: Error allocating RX Ring\n", dev->name); break; } - skb_reserve(skb, HW_IP_ALIGN); + skb_reserve(skb, 2); } clear_bit(0, &mp->rx_task_busy); /* @@ -205,7 +207,7 @@ static void mv643xx_eth_rx_task(void *data) else { /* Return interrupts */ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), - INT_UNMASK_ALL); + INT_CAUSE_UNMASK_ALL); } #endif } @@ -265,8 +267,6 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); - - eth_port_set_multicast_list(dev); } /* @@ -342,6 +342,8 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, if (!(eth_int_cause_ext & (BIT0 | BIT8))) return released; + spin_lock(&mp->lock); + /* Check only queue 0 */ while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { if (pkt_info.cmd_sts & BIT0) { @@ -349,21 +351,31 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, stats->tx_errors++; } - if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) - dma_unmap_single(NULL, pkt_info.buf_ptr, - pkt_info.byte_cnt, - DMA_TO_DEVICE); - else - dma_unmap_page(NULL, pkt_info.buf_ptr, - pkt_info.byte_cnt, - DMA_TO_DEVICE); - + /* + * If return_info is different than 0, release the skb. + * The case where return_info is not 0 is only in case + * when transmitted a scatter/gather packet, where only + * last skb releases the whole chain. + */ if (pkt_info.return_info) { + if (skb_shinfo(pkt_info.return_info)->nr_frags) + dma_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, + DMA_TO_DEVICE); + else + dma_unmap_single(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, + DMA_TO_DEVICE); + dev_kfree_skb_irq(pkt_info.return_info); released = 0; - } + } else + dma_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, DMA_TO_DEVICE); } + spin_unlock(&mp->lock); + return released; } @@ -470,12 +482,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, /* Read interrupt cause registers */ eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & - INT_UNMASK_ALL; + INT_CAUSE_UNMASK_ALL; if (eth_int_cause & BIT1) eth_int_cause_ext = mv_read( MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & - INT_UNMASK_ALL_EXT; + INT_CAUSE_UNMASK_ALL_EXT; #ifdef MV643XX_NAPI if (!(eth_int_cause & 0x0007fffd)) { @@ -500,10 +512,9 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, } else { if (netif_rx_schedule_prep(dev)) { /* Mask all the interrupts */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), - INT_MASK_ALL); - /* wait for previous write to complete */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); + mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); + mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG + (port_num), 0); __netif_rx_schedule(dev); } #else @@ -516,12 +527,9 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, * with skb's. */ #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK - /* Mask all interrupts on ethernet port */ + /* Unmask all interrupts on ethernet port */ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), - INT_MASK_ALL); - /* wait for previous write to take effect */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); - + INT_CAUSE_MASK_ALL); queue_task(&mp->rx_task, &tq_immediate); mark_bh(IMMEDIATE_BH); #else @@ -627,6 +635,56 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, return coal; } +/* + * mv643xx_eth_open + * + * This function is called when openning the network device. The function + * should initialize all the hardware, initialize cyclic Rx/Tx + * descriptors chain and buffers and allocate an IRQ to the network + * device. + * + * Input : a pointer to the network device structure + * + * Output : zero of success , nonzero if fails. + */ + +static int mv643xx_eth_open(struct net_device *dev) +{ + struct mv643xx_private *mp = netdev_priv(dev); + unsigned int port_num = mp->port_num; + int err; + + spin_lock_irq(&mp->lock); + + err = request_irq(dev->irq, mv643xx_eth_int_handler, + SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); + + if (err) { + printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", + port_num); + err = -EAGAIN; + goto out; + } + + if (mv643xx_eth_real_open(dev)) { + printk("%s: Error opening interface\n", dev->name); + err = -EBUSY; + goto out_free; + } + + spin_unlock_irq(&mp->lock); + + return 0; + +out_free: + free_irq(dev->irq, dev); + +out: + spin_unlock_irq(&mp->lock); + + return err; +} + /* * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. * @@ -719,37 +777,28 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) mp->port_tx_queue_command |= 1; } -/* - * mv643xx_eth_open - * - * This function is called when openning the network device. The function - * should initialize all the hardware, initialize cyclic Rx/Tx - * descriptors chain and buffers and allocate an IRQ to the network - * device. - * - * Input : a pointer to the network device structure - * - * Output : zero of success , nonzero if fails. - */ - -static int mv643xx_eth_open(struct net_device *dev) +/* Helper function for mv643xx_eth_open */ +static int mv643xx_eth_real_open(struct net_device *dev) { struct mv643xx_private *mp = netdev_priv(dev); unsigned int port_num = mp->port_num; unsigned int size; - int err; - - err = request_irq(dev->irq, mv643xx_eth_int_handler, - SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); - if (err) { - printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", - port_num); - return -EAGAIN; - } /* Stop RX Queues */ mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); + /* Clear the ethernet port interrupts */ + mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); + mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); + + /* Unmask RX buffer and TX end interrupt */ + mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL); + + /* Unmask phy and link status changes interrupts */ + mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL_EXT); + /* Set the MAC Address */ memcpy(mp->port_mac_addr, dev->dev_addr, 6); @@ -769,15 +818,14 @@ static int mv643xx_eth_open(struct net_device *dev) GFP_KERNEL); if (!mp->rx_skb) { printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); - err = -ENOMEM; - goto out_free_irq; + return -ENOMEM; } mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, GFP_KERNEL); if (!mp->tx_skb) { printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); - err = -ENOMEM; - goto out_free_rx_skb; + kfree(mp->rx_skb); + return -ENOMEM; } /* Allocate TX ring */ @@ -797,8 +845,9 @@ static int mv643xx_eth_open(struct net_device *dev) if (!mp->p_tx_desc_area) { printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", dev->name, size); - err = -ENOMEM; - goto out_free_tx_skb; + kfree(mp->rx_skb); + kfree(mp->tx_skb); + return -ENOMEM; } BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); @@ -825,12 +874,13 @@ static int mv643xx_eth_open(struct net_device *dev) printk(KERN_ERR "%s: Freeing previously allocated TX queues...", dev->name); if (mp->rx_sram_size) - iounmap(mp->p_tx_desc_area); + iounmap(mp->p_rx_desc_area); else dma_free_coherent(NULL, mp->tx_desc_area_size, mp->p_tx_desc_area, mp->tx_desc_dma); - err = -ENOMEM; - goto out_free_tx_skb; + kfree(mp->rx_skb); + kfree(mp->tx_skb); + return -ENOMEM; } memset((void *)mp->p_rx_desc_area, 0, size); @@ -850,26 +900,9 @@ static int mv643xx_eth_open(struct net_device *dev) mp->tx_int_coal = eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); - /* Clear any pending ethernet port interrupts */ - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); - - /* Unmask phy and link status changes interrupts */ - mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), - INT_UNMASK_ALL_EXT); + netif_start_queue(dev); - /* Unmask RX buffer and TX end interrupt */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); return 0; - -out_free_tx_skb: - kfree(mp->tx_skb); -out_free_rx_skb: - kfree(mp->rx_skb); -out_free_irq: - free_irq(dev->irq, dev); - - return err; } static void mv643xx_eth_free_tx_rings(struct net_device *dev) @@ -877,17 +910,14 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev) struct mv643xx_private *mp = netdev_priv(dev); unsigned int port_num = mp->port_num; unsigned int curr; - struct sk_buff *skb; /* Stop Tx Queues */ mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); /* Free outstanding skb's on TX rings */ for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { - skb = mp->tx_skb[curr]; - if (skb) { - mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags; - dev_kfree_skb(skb); + if (mp->tx_skb[curr]) { + dev_kfree_skb(mp->tx_skb[curr]); mp->tx_ring_skbs--; } } @@ -943,32 +973,44 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev) * Output : zero if success , nonzero if fails */ -static int mv643xx_eth_stop(struct net_device *dev) +/* Helper function for mv643xx_eth_stop */ + +static int mv643xx_eth_real_stop(struct net_device *dev) { struct mv643xx_private *mp = netdev_priv(dev); unsigned int port_num = mp->port_num; - /* Mask all interrupts on ethernet port */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); - /* wait for previous write to complete */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); - -#ifdef MV643XX_NAPI - netif_poll_disable(dev); -#endif netif_carrier_off(dev); netif_stop_queue(dev); - eth_port_reset(mp->port_num); - mv643xx_eth_free_tx_rings(dev); mv643xx_eth_free_rx_rings(dev); -#ifdef MV643XX_NAPI - netif_poll_enable(dev); -#endif + eth_port_reset(mp->port_num); + + /* Disable ethernet port interrupts */ + mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); + mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); + + /* Mask RX buffer and TX end interrupt */ + mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); + + /* Mask phy and link status changes interrupts */ + mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0); + + return 0; +} + +static int mv643xx_eth_stop(struct net_device *dev) +{ + struct mv643xx_private *mp = netdev_priv(dev); + + spin_lock_irq(&mp->lock); + + mv643xx_eth_real_stop(dev); free_irq(dev->irq, dev); + spin_unlock_irq(&mp->lock); return 0; } @@ -980,17 +1022,20 @@ static void mv643xx_tx(struct net_device *dev) struct pkt_info pkt_info; while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { - if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) - dma_unmap_single(NULL, pkt_info.buf_ptr, - pkt_info.byte_cnt, - DMA_TO_DEVICE); - else - dma_unmap_page(NULL, pkt_info.buf_ptr, - pkt_info.byte_cnt, - DMA_TO_DEVICE); + if (pkt_info.return_info) { + if (skb_shinfo(pkt_info.return_info)->nr_frags) + dma_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, + DMA_TO_DEVICE); + else + dma_unmap_single(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, + DMA_TO_DEVICE); - if (pkt_info.return_info) dev_kfree_skb_irq(pkt_info.return_info); + } else + dma_unmap_page(NULL, pkt_info.buf_ptr, + pkt_info.byte_cnt, DMA_TO_DEVICE); } if (netif_queue_stopped(dev) && @@ -1008,11 +1053,14 @@ static int mv643xx_poll(struct net_device *dev, int *budget) struct mv643xx_private *mp = netdev_priv(dev); int done = 1, orig_budget, work_done; unsigned int port_num = mp->port_num; + unsigned long flags; #ifdef MV643XX_TX_FAST_REFILL if (++mp->tx_clean_threshold > 5) { + spin_lock_irqsave(&mp->lock, flags); mv643xx_tx(dev); mp->tx_clean_threshold = 0; + spin_unlock_irqrestore(&mp->lock, flags); } #endif @@ -1030,36 +1078,21 @@ static int mv643xx_poll(struct net_device *dev, int *budget) } if (done) { - netif_rx_complete(dev); + spin_lock_irqsave(&mp->lock, flags); + __netif_rx_complete(dev); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), - INT_UNMASK_ALL); + INT_CAUSE_UNMASK_ALL); + mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL_EXT); + spin_unlock_irqrestore(&mp->lock, flags); } return done ? 0 : 1; } #endif -/* Hardware can't handle unaligned fragments smaller than 9 bytes. - * This helper function detects that case. - */ - -static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) -{ - unsigned int frag; - skb_frag_t *fragp; - - for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { - fragp = &skb_shinfo(skb)->frags[frag]; - if (fragp->size <= 8 && fragp->page_offset & 0x7) - return 1; - - } - return 0; -} - - /* * mv643xx_eth_start_xmit * @@ -1103,19 +1136,12 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) return 1; } -#ifdef MV643XX_CHECKSUM_OFFLOAD_TX - if (has_tiny_unaligned_frags(skb)) { - if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { - stats->tx_dropped++; - printk(KERN_DEBUG "%s: failed to linearize tiny " - "unaligned fragment\n", dev->name); - return 1; - } - } - spin_lock_irqsave(&mp->lock, flags); + /* Update packet info data structure -- DMA owned, first last */ +#ifdef MV643XX_CHECKSUM_OFFLOAD_TX if (!skb_shinfo(skb)->nr_frags) { +linear: if (skb->ip_summed != CHECKSUM_HW) { /* Errata BTS #50, IHL must be 5 if no HW checksum */ pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | @@ -1124,6 +1150,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) 5 << ETH_TX_IHL_SHIFT; pkt_info.l4i_chk = 0; } else { + pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC | @@ -1131,16 +1158,14 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ETH_GEN_IP_V_4_CHECKSUM | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; /* CPU already calculated pseudo header checksum. */ - if ((skb->protocol == ETH_P_IP) && - (skb->nh.iph->protocol == IPPROTO_UDP) ) { + if (skb->nh.iph->protocol == IPPROTO_UDP) { pkt_info.cmd_sts |= ETH_UDP_FRAME; pkt_info.l4i_chk = skb->h.uh->check; - } else if ((skb->protocol == ETH_P_IP) && - (skb->nh.iph->protocol == IPPROTO_TCP)) + } else if (skb->nh.iph->protocol == IPPROTO_TCP) pkt_info.l4i_chk = skb->h.th->check; else { printk(KERN_ERR - "%s: chksum proto != IPv4 TCP or UDP\n", + "%s: chksum proto != TCP or UDP\n", dev->name); spin_unlock_irqrestore(&mp->lock, flags); return 1; @@ -1158,6 +1183,26 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) } else { unsigned int frag; + /* Since hardware can't handle unaligned fragments smaller + * than 9 bytes, if we find any, we linearize the skb + * and start again. When I've seen it, it's always been + * the first frag (probably near the end of the page), + * but we check all frags to be safe. + */ + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + skb_frag_t *fragp; + + fragp = &skb_shinfo(skb)->frags[frag]; + if (fragp->size <= 8 && fragp->page_offset & 0x7) { + skb_linearize(skb, GFP_ATOMIC); + printk(KERN_DEBUG "%s: unaligned tiny fragment" + "%d of %d, fixed\n", + dev->name, frag, + skb_shinfo(skb)->nr_frags); + goto linear; + } + } + /* first frag which is skb header */ pkt_info.byte_cnt = skb_headlen(skb); pkt_info.buf_ptr = dma_map_single(NULL, skb->data, @@ -1176,16 +1221,14 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ETH_GEN_IP_V_4_CHECKSUM | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; /* CPU already calculated pseudo header checksum. */ - if ((skb->protocol == ETH_P_IP) && - (skb->nh.iph->protocol == IPPROTO_UDP)) { + if (skb->nh.iph->protocol == IPPROTO_UDP) { pkt_info.cmd_sts |= ETH_UDP_FRAME; pkt_info.l4i_chk = skb->h.uh->check; - } else if ((skb->protocol == ETH_P_IP) && - (skb->nh.iph->protocol == IPPROTO_TCP)) + } else if (skb->nh.iph->protocol == IPPROTO_TCP) pkt_info.l4i_chk = skb->h.th->check; else { printk(KERN_ERR - "%s: chksum proto != IPv4 TCP or UDP\n", + "%s: chksum proto != TCP or UDP\n", dev->name); spin_unlock_irqrestore(&mp->lock, flags); return 1; @@ -1245,8 +1288,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - spin_lock_irqsave(&mp->lock, flags); - pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC; pkt_info.l4i_chk = 0; @@ -1299,18 +1340,39 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) } #ifdef CONFIG_NET_POLL_CONTROLLER -static void mv643xx_netpoll(struct net_device *netdev) +static inline void mv643xx_enable_irq(struct mv643xx_private *mp) +{ + int port_num = mp->port_num; + unsigned long flags; + + spin_lock_irqsave(&mp->lock, flags); + mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL); + mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), + INT_CAUSE_UNMASK_ALL_EXT); + spin_unlock_irqrestore(&mp->lock, flags); +} + +static inline void mv643xx_disable_irq(struct mv643xx_private *mp) { - struct mv643xx_private *mp = netdev_priv(netdev); int port_num = mp->port_num; + unsigned long flags; - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); - /* wait for previous write to complete */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); + spin_lock_irqsave(&mp->lock, flags); + mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), + INT_CAUSE_MASK_ALL); + mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), + INT_CAUSE_MASK_ALL_EXT); + spin_unlock_irqrestore(&mp->lock, flags); +} - mv643xx_eth_int_handler(netdev->irq, netdev, NULL); +static void mv643xx_netpoll(struct net_device *netdev) +{ + struct mv643xx_private *mp = netdev_priv(netdev); - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); + mv643xx_disable_irq(mp); + mv643xx_eth_int_handler(netdev->irq, netdev, NULL); + mv643xx_enable_irq(mp); } #endif @@ -1379,7 +1441,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) * Zero copy can only work if we use Discovery II memory. Else, we will * have to map the buffers to ISA memory which is only 16 MB */ - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; #endif #endif @@ -1991,196 +2053,6 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, return 1; } -/* - * The entries in each table are indexed by a hash of a packet's MAC - * address. One bit in each entry determines whether the packet is - * accepted. There are 4 entries (each 8 bits wide) in each register - * of the table. The bits in each entry are defined as follows: - * 0 Accept=1, Drop=0 - * 3-1 Queue (ETH_Q0=0) - * 7-4 Reserved = 0; - */ -static void eth_port_set_filter_table_entry(int table, unsigned char entry) -{ - unsigned int table_reg; - unsigned int tbl_offset; - unsigned int reg_offset; - - tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */ - reg_offset = entry % 4; /* Entry offset within the register */ - - /* Set "accepts frame bit" at specified table entry */ - table_reg = mv_read(table + tbl_offset); - table_reg |= 0x01 << (8 * reg_offset); - mv_write(table + tbl_offset, table_reg); -} - -/* - * eth_port_mc_addr - Multicast address settings. - * - * The MV device supports multicast using two tables: - * 1) Special Multicast Table for MAC addresses of the form - * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF). - * The MAC DA[7:0] bits are used as a pointer to the Special Multicast - * Table entries in the DA-Filter table. - * 2) Other Multicast Table for multicast of another type. A CRC-8bit - * is used as an index to the Other Multicast Table entries in the - * DA-Filter table. This function calculates the CRC-8bit value. - * In either case, eth_port_set_filter_table_entry() is then called - * to set to set the actual table entry. - */ -static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) -{ - unsigned int mac_h; - unsigned int mac_l; - unsigned char crc_result = 0; - int table; - int mac_array[48]; - int crc[8]; - int i; - - if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && - (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { - table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE - (eth_port_num); - eth_port_set_filter_table_entry(table, p_addr[5]); - return; - } - - /* Calculate CRC-8 out of the given address */ - mac_h = (p_addr[0] << 8) | (p_addr[1]); - mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) | - (p_addr[4] << 8) | (p_addr[5] << 0); - - for (i = 0; i < 32; i++) - mac_array[i] = (mac_l >> i) & 0x1; - for (i = 32; i < 48; i++) - mac_array[i] = (mac_h >> (i - 32)) & 0x1; - - crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^ - mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^ - mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^ - mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^ - mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0]; - - crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^ - mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^ - mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^ - mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^ - mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^ - mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^ - mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0]; - - crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^ - mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^ - mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^ - mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^ - mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ - mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0]; - - crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^ - mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^ - mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^ - mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ - mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^ - mac_array[3] ^ mac_array[2] ^ mac_array[1]; - - crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^ - mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^ - mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^ - mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^ - mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^ - mac_array[3] ^ mac_array[2]; - - crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^ - mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^ - mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^ - mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^ - mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^ - mac_array[4] ^ mac_array[3]; - - crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^ - mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^ - mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^ - mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^ - mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^ - mac_array[4]; - - crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^ - mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^ - mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^ - mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^ - mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5]; - - for (i = 0; i < 8; i++) - crc_result = crc_result | (crc[i] << i); - - table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); - eth_port_set_filter_table_entry(table, crc_result); -} - -/* - * Set the entire multicast list based on dev->mc_list. - */ -static void eth_port_set_multicast_list(struct net_device *dev) -{ - - struct dev_mc_list *mc_list; - int i; - int table_index; - struct mv643xx_private *mp = netdev_priv(dev); - unsigned int eth_port_num = mp->port_num; - - /* If the device is in promiscuous mode or in all multicast mode, - * we will fully populate both multicast tables with accept. - * This is guaranteed to yield a match on all multicast addresses... - */ - if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) { - for (table_index = 0; table_index <= 0xFC; table_index += 4) { - /* Set all entries in DA filter special multicast - * table (Ex_dFSMT) - * Set for ETH_Q0 for now - * Bits - * 0 Accept=1, Drop=0 - * 3-1 Queue ETH_Q0=0 - * 7-4 Reserved = 0; - */ - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); - - /* Set all entries in DA filter other multicast - * table (Ex_dFOMT) - * Set for ETH_Q0 for now - * Bits - * 0 Accept=1, Drop=0 - * 3-1 Queue ETH_Q0=0 - * 7-4 Reserved = 0; - */ - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); - } - return; - } - - /* We will clear out multicast tables every time we get the list. - * Then add the entire new list... - */ - for (table_index = 0; table_index <= 0xFC; table_index += 4) { - /* Clear DA filter special multicast table (Ex_dFSMT) */ - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE - (eth_port_num) + table_index, 0); - - /* Clear DA filter other multicast table (Ex_dFOMT) */ - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE - (eth_port_num) + table_index, 0); - } - - /* Get pointer to net_device multicast list and add each one... */ - for (i = 0, mc_list = dev->mc_list; - (i < 256) && (mc_list != NULL) && (i < dev->mc_count); - i++, mc_list = mc_list->next) - if (mc_list->dmi_addrlen == 6) - eth_port_mc_addr(eth_port_num, mc_list->dmi_addr); -} - /* * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables * @@ -2208,11 +2080,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) for (table_index = 0; table_index <= 0xFC; table_index += 4) { /* Clear DA filter special multicast table (Ex_dFSMT) */ - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE - (eth_port_num) + table_index, 0); + mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE + (eth_port_num) + table_index), 0); /* Clear DA filter other multicast table (Ex_dFOMT) */ - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE - (eth_port_num) + table_index, 0); + mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE + (eth_port_num) + table_index), 0); } } @@ -2617,7 +2489,6 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, struct eth_tx_desc *current_descriptor; struct eth_tx_desc *first_descriptor; u32 command; - unsigned long flags; /* Do not process Tx ring in case of Tx ring resource error */ if (mp->tx_resource_err) @@ -2634,8 +2505,6 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, return ETH_ERROR; } - spin_lock_irqsave(&mp->lock, flags); - mp->tx_ring_skbs++; BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); @@ -2685,15 +2554,11 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, mp->tx_resource_err = 1; mp->tx_curr_desc_q = tx_first_desc; - spin_unlock_irqrestore(&mp->lock, flags); - return ETH_QUEUE_LAST_RESOURCE; } mp->tx_curr_desc_q = tx_next_desc; - spin_unlock_irqrestore(&mp->lock, flags); - return ETH_OK; } #else @@ -2704,14 +2569,11 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, int tx_desc_used; struct eth_tx_desc *current_descriptor; unsigned int command_status; - unsigned long flags; /* Do not process Tx ring in case of Tx ring resource error */ if (mp->tx_resource_err) return ETH_QUEUE_FULL; - spin_lock_irqsave(&mp->lock, flags); - mp->tx_ring_skbs++; BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); @@ -2742,12 +2604,9 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, /* Check for ring index overlap in the Tx desc ring */ if (tx_desc_curr == tx_desc_used) { mp->tx_resource_err = 1; - - spin_unlock_irqrestore(&mp->lock, flags); return ETH_QUEUE_LAST_RESOURCE; } - spin_unlock_irqrestore(&mp->lock, flags); return ETH_OK; } #endif @@ -2770,27 +2629,23 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, * Tx ring 'first' and 'used' indexes are updated. * * RETURN: - * ETH_OK on success - * ETH_ERROR otherwise. + * ETH_ERROR in case the routine can not access Tx desc ring. + * ETH_RETRY in case there is transmission in process. + * ETH_END_OF_JOB if the routine has nothing to release. + * ETH_OK otherwise. * */ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, struct pkt_info *p_pkt_info) { int tx_desc_used; - int tx_busy_desc; - struct eth_tx_desc *p_tx_desc_used; - unsigned int command_status; - unsigned long flags; - int err = ETH_OK; - - spin_lock_irqsave(&mp->lock, flags); - #ifdef MV643XX_CHECKSUM_OFFLOAD_TX - tx_busy_desc = mp->tx_first_desc_q; + int tx_busy_desc = mp->tx_first_desc_q; #else - tx_busy_desc = mp->tx_curr_desc_q; + int tx_busy_desc = mp->tx_curr_desc_q; #endif + struct eth_tx_desc *p_tx_desc_used; + unsigned int command_status; /* Get the Tx Desc ring indexes */ tx_desc_used = mp->tx_used_desc_q; @@ -2798,30 +2653,22 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; /* Sanity check */ - if (p_tx_desc_used == NULL) { - err = ETH_ERROR; - goto out; - } + if (p_tx_desc_used == NULL) + return ETH_ERROR; /* Stop release. About to overlap the current available Tx descriptor */ - if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) { - err = ETH_ERROR; - goto out; - } + if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) + return ETH_END_OF_JOB; command_status = p_tx_desc_used->cmd_sts; /* Still transmitting... */ - if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { - err = ETH_ERROR; - goto out; - } + if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) + return ETH_RETRY; /* Pass the packet information to the caller */ p_pkt_info->cmd_sts = command_status; p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; - p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr; - p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt; mp->tx_skb[tx_desc_used] = NULL; /* Update the next descriptor to release. */ @@ -2833,10 +2680,7 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, BUG_ON(mp->tx_ring_skbs == 0); mp->tx_ring_skbs--; -out: - spin_unlock_irqrestore(&mp->lock, flags); - - return err; + return ETH_OK; } /* @@ -2868,14 +2712,11 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, int rx_next_curr_desc, rx_curr_desc, rx_used_desc; volatile struct eth_rx_desc *p_rx_desc; unsigned int command_status; - unsigned long flags; /* Do not process Rx ring in case of Rx ring resource error */ if (mp->rx_resource_err) return ETH_QUEUE_FULL; - spin_lock_irqsave(&mp->lock, flags); - /* Get the Rx Desc ring 'curr and 'used' indexes */ rx_curr_desc = mp->rx_curr_desc_q; rx_used_desc = mp->rx_used_desc_q; @@ -2887,10 +2728,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, rmb(); /* Nothing to receive... */ - if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { - spin_unlock_irqrestore(&mp->lock, flags); + if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) return ETH_END_OF_JOB; - } p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; p_pkt_info->cmd_sts = command_status; @@ -2910,8 +2749,6 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, if (rx_next_curr_desc == rx_used_desc) mp->rx_resource_err = 1; - spin_unlock_irqrestore(&mp->lock, flags); - return ETH_OK; } @@ -2940,9 +2777,6 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, { int used_rx_desc; /* Where to return Rx resource */ volatile struct eth_rx_desc *p_used_rx_desc; - unsigned long flags; - - spin_lock_irqsave(&mp->lock, flags); /* Get 'used' Rx descriptor */ used_rx_desc = mp->rx_used_desc_q; @@ -2966,8 +2800,6 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, /* Any Rx return cancels the Rx resource error status */ mp->rx_resource_err = 0; - spin_unlock_irqrestore(&mp->lock, flags); - return ETH_OK; } diff --git a/trunk/drivers/net/skge.c b/trunk/drivers/net/skge.c index bf55a4cfb3d2..b538e3038058 100644 --- a/trunk/drivers/net/skge.c +++ b/trunk/drivers/net/skge.c @@ -3243,22 +3243,12 @@ static int __devinit skge_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (sizeof(dma_addr_t) > sizeof(u32) && - !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { + if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); - if (err < 0) { - printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " - "for consistent allocations\n", pci_name(pdev)); - goto err_out_free_regions; - } - } else { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (err) { - printk(KERN_ERR PFX "%s no usable DMA configuration\n", - pci_name(pdev)); - goto err_out_free_regions; - } + else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { + printk(KERN_ERR PFX "%s no usable DMA configuration\n", + pci_name(pdev)); + goto err_out_free_regions; } #ifdef __BIG_ENDIAN diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index f8b973a04b65..f5d697c0c031 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -57,7 +57,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "0.13" +#define DRV_VERSION "0.11" #define PFX DRV_NAME " " /* @@ -75,7 +75,6 @@ #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) #define RX_DEF_PENDING RX_MAX_PENDING -#define RX_SKB_ALIGN 8 #define TX_RING_SIZE 512 #define TX_DEF_PENDING (TX_RING_SIZE - 1) @@ -92,7 +91,7 @@ static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR - | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR; static int debug = -1; /* defaults above */ module_param(debug, int, 0); @@ -625,16 +624,13 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) } -/* Assign Ram Buffer allocation. - * start and end are in units of 4k bytes - * ram registers are in units of 64bit words - */ -static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk) +static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) { - u32 start, end; + u32 end; - start = startk * 4096/8; - end = (endk * 4096/8) - 1; + start /= 8; + len /= 8; + end = start + len - 1; sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); sky2_write32(hw, RB_ADDR(q, RB_START), start); @@ -643,19 +639,14 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk) sky2_write32(hw, RB_ADDR(q, RB_RP), start); if (q == Q_R1 || q == Q_R2) { - u32 space = (endk - startk) * 4096/8; - u32 tp = space - space/4; + u32 rxup, rxlo; - /* On receive queue's set the thresholds - * give receiver priority when > 3/4 full - * send pause when down to 2K - */ - sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); - sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); + rxlo = len/2; + rxup = rxlo + len/4; - tp = space - 2048/8; - sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); - sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); + /* Set thresholds on receive queue's */ + sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo); } else { /* Enable store & forward on Tx queue's because * Tx FIFO is only 1K on Yukon @@ -704,10 +695,9 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) * This is a workaround code taken from SysKonnect sk98lin driver * to deal with chip bug on Yukon EC rev 0 in the wraparound case. */ -static void sky2_put_idx(struct sky2_hw *hw, unsigned q, +static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx, u16 *last, u16 size) { - wmb(); if (is_ec_a1(hw) && idx < *last) { u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); @@ -731,7 +721,6 @@ static void sky2_put_idx(struct sky2_hw *hw, unsigned q, sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); } *last = idx; - mmiowb(); } @@ -745,11 +734,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) /* Return high part of DMA address (could be 32 or 64 bit) */ static inline u32 high32(dma_addr_t a) { - return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0; + return (a >> 16) >> 16; } /* Build description to hardware about buffer */ -static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) +static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) { struct sky2_rx_le *le; u32 hi = high32(map); @@ -889,13 +878,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp struct sky2_hw *hw = sky2->hw; u16 port = sky2->port; - spin_lock_bh(&sky2->tx_lock); + spin_lock(&sky2->tx_lock); sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); sky2->vlgrp = grp; - spin_unlock_bh(&sky2->tx_lock); + spin_unlock(&sky2->tx_lock); } static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) @@ -904,42 +893,27 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) struct sky2_hw *hw = sky2->hw; u16 port = sky2->port; - spin_lock_bh(&sky2->tx_lock); + spin_lock(&sky2->tx_lock); sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); if (sky2->vlgrp) sky2->vlgrp->vlan_devices[vid] = NULL; - spin_unlock_bh(&sky2->tx_lock); + spin_unlock(&sky2->tx_lock); } #endif -/* - * It appears the hardware has a bug in the FIFO logic that - * cause it to hang if the FIFO gets overrun and the receive buffer - * is not aligned. ALso alloc_skb() won't align properly if slab - * debugging is enabled. - */ -static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask) -{ - struct sk_buff *skb; - - skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); - if (likely(skb)) { - unsigned long p = (unsigned long) skb->data; - skb_reserve(skb, - ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p); - } - - return skb; -} - /* * Allocate and setup receiver buffer pool. * In case of 64 bit dma, there are 2X as many list elements * available as ring entries * and need to reserve one list element so we don't wrap around. + * + * It appears the hardware has a bug in the FIFO logic that + * cause it to hang if the FIFO gets overrun and the receive buffer + * is not aligned. This means we can't use skb_reserve to align + * the IP header. */ static int sky2_rx_start(struct sky2_port *sky2) { @@ -955,7 +929,7 @@ static int sky2_rx_start(struct sky2_port *sky2) for (i = 0; i < sky2->rx_pending; i++) { struct ring_info *re = sky2->rx_ring + i; - re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL); + re->skb = dev_alloc_skb(sky2->rx_bufsize); if (!re->skb) goto nomem; @@ -1012,19 +986,19 @@ static int sky2_up(struct net_device *dev) sky2_mac_init(hw, port); - /* Determine available ram buffer space (in 4K blocks). - * Note: not sure about the FE setting below yet - */ - if (hw->chip_id == CHIP_ID_YUKON_FE) - ramsize = 4; - else - ramsize = sky2_read8(hw, B2_E_0); - - /* Give transmitter one third (rounded up) */ - rxspace = ramsize - (ramsize + 2) / 3; + /* Configure RAM buffers */ + if (hw->chip_id == CHIP_ID_YUKON_FE || + (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2)) + ramsize = 4096; + else { + u8 e0 = sky2_read8(hw, B2_E_0); + ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096); + } + /* 2/3 for Rx */ + rxspace = (2 * ramsize) / 3; sky2_ramset(hw, rxqaddr[port], 0, rxspace); - sky2_ramset(hw, txqaddr[port], rxspace, ramsize); + sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); /* Make sure SyncQ is disabled */ sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), @@ -1080,7 +1054,7 @@ static inline int tx_avail(const struct sky2_port *sky2) } /* Estimate of number of transmit list elements required */ -static unsigned tx_le_req(const struct sk_buff *skb) +static inline unsigned tx_le_req(const struct sk_buff *skb) { unsigned count; @@ -1116,10 +1090,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) u16 mss; u8 ctrl; - /* No BH disabling for tx_lock here. We are running in BH disabled - * context and TX reclaim runs via poll inside of a software - * interrupt, and no related locks in IRQ processing. - */ if (!spin_trylock(&sky2->tx_lock)) return NETDEV_TX_LOCKED; @@ -1129,9 +1099,8 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) */ if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); - if (net_ratelimit()) - printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", - dev->name); + printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", + dev->name); } spin_unlock(&sky2->tx_lock); @@ -1230,7 +1199,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); - addr64 = high32(mapping); + addr64 = (mapping >> 16) >> 16; if (addr64 != sky2->tx_addr64) { le = get_tx_le(sky2); le->tx.addr = cpu_to_le32(addr64); @@ -1260,6 +1229,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); out_unlock: + mmiowb(); spin_unlock(&sky2->tx_lock); dev->trans_start = jiffies; @@ -1312,17 +1282,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) dev_kfree_skb_any(skb); } + spin_lock(&sky2->tx_lock); sky2->tx_cons = put; if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) netif_wake_queue(dev); + spin_unlock(&sky2->tx_lock); } /* Cleanup all untransmitted buffers, assume transmitter not running */ static void sky2_tx_clean(struct sky2_port *sky2) { - spin_lock_bh(&sky2->tx_lock); sky2_tx_complete(sky2, sky2->tx_prod); - spin_unlock_bh(&sky2->tx_lock); } /* Network shutdown */ @@ -1612,40 +1582,28 @@ static void sky2_phy_task(void *arg) local_irq_enable(); } - -/* Transmit timeout is only called if we are running, carries is up - * and tx queue is full (stopped). - */ static void sky2_tx_timeout(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned txq = txqaddr[sky2->port]; - u16 ridx; - - /* Maybe we just missed an status interrupt */ - spin_lock(&sky2->tx_lock); - ridx = sky2_read16(hw, - sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX); - sky2_tx_complete(sky2, ridx); - spin_unlock(&sky2->tx_lock); - - if (!netif_queue_stopped(dev)) { - if (net_ratelimit()) - pr_info(PFX "transmit interrupt missed? recovered\n"); - return; - } if (netif_msg_timer(sky2)) printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); + netif_stop_queue(dev); + sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); + sky2_read32(hw, Q_ADDR(txq, Q_CSR)); + sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_tx_clean(sky2); sky2_qset(hw, txq); sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); + + netif_wake_queue(dev); } @@ -1755,7 +1713,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, } else { struct sk_buff *nskb; - nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC); + nskb = dev_alloc_skb(sky2->rx_bufsize); if (!nskb) goto resubmit; @@ -1787,7 +1745,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, error: ++sky2->net_stats.rx_errors; - if (netif_msg_rx_err(sky2) && net_ratelimit()) + if (netif_msg_rx_err(sky2)) printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", sky2->netdev->name, status, length); @@ -1808,16 +1766,13 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, */ #define TX_NO_STATUS 0xffff -static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) +static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) { if (last != TX_NO_STATUS) { struct net_device *dev = hw->dev[port]; if (dev && netif_running(dev)) { struct sky2_port *sky2 = netdev_priv(dev); - - spin_lock(&sky2->tx_lock); sky2_tx_complete(sky2, last); - spin_unlock(&sky2->tx_lock); } } } @@ -1845,6 +1800,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) struct sk_buff *skb; u32 status; u16 length; + u8 op; le = hw->st_le + hw->st_idx; hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; @@ -1858,8 +1814,10 @@ static int sky2_poll(struct net_device *dev0, int *budget) sky2 = netdev_priv(dev); status = le32_to_cpu(le->status); length = le16_to_cpu(le->length); + op = le->opcode & ~HW_OWNER; + le->opcode = 0; - switch (le->opcode & ~HW_OWNER) { + switch (op) { case OP_RXSTAT: skb = sky2_receive(sky2, length, status); if (!skb) @@ -1907,13 +1865,14 @@ static int sky2_poll(struct net_device *dev0, int *budget) default: if (net_ratelimit()) printk(KERN_WARNING PFX - "unknown status opcode 0x%x\n", le->opcode); + "unknown status opcode 0x%x\n", op); break; } } exit_loop: sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + mmiowb(); sky2_tx_check(hw, 0, tx_done[0]); sky2_tx_check(hw, 1, tx_done[1]); @@ -1928,6 +1887,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) netif_rx_complete(dev0); hw->intr_mask |= Y2_IS_STAT_BMU; sky2_write32(hw, B0_IMSK, hw->intr_mask); + mmiowb(); return 0; } else { *budget -= work_done; @@ -1940,42 +1900,35 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) { struct net_device *dev = hw->dev[port]; - if (net_ratelimit()) - printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", - dev->name, status); + printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", + dev->name, status); if (status & Y2_IS_PAR_RD1) { - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: ram data read parity error\n", - dev->name); + printk(KERN_ERR PFX "%s: ram data read parity error\n", + dev->name); /* Clear IRQ */ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); } if (status & Y2_IS_PAR_WR1) { - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: ram data write parity error\n", - dev->name); + printk(KERN_ERR PFX "%s: ram data write parity error\n", + dev->name); sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); } if (status & Y2_IS_PAR_MAC1) { - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); + printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); } if (status & Y2_IS_PAR_RX1) { - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); + printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); } if (status & Y2_IS_TCP_TXA1) { - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: TCP segmentation error\n", - dev->name); + printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name); sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); } } @@ -1991,9 +1944,8 @@ static void sky2_hw_intr(struct sky2_hw *hw) u16 pci_err; pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", - pci_name(hw->pdev), pci_err); + printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", + pci_name(hw->pdev), pci_err); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_write_config_word(hw->pdev, PCI_STATUS, @@ -2007,9 +1959,8 @@ static void sky2_hw_intr(struct sky2_hw *hw) pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); - if (net_ratelimit()) - printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", - pci_name(hw->pdev), pex_err); + printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", + pci_name(hw->pdev), pex_err); /* clear the interrupt */ sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); @@ -2299,7 +2250,7 @@ static int sky2_reset(struct sky2_hw *hw) return 0; } -static u32 sky2_supported_modes(const struct sky2_hw *hw) +static inline u32 sky2_supported_modes(const struct sky2_hw *hw) { u32 modes; if (hw->copper) { @@ -3044,7 +2995,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, return dev; } -static void __devinit sky2_show_addr(struct net_device *dev) +static inline void sky2_show_addr(struct net_device *dev) { const struct sky2_port *sky2 = netdev_priv(dev); @@ -3087,17 +3038,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out_free_regions; } - if (sizeof(dma_addr_t) > sizeof(u32) && - !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { - using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); - if (err < 0) { - printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " - "for consistent allocations\n", pci_name(pdev)); - goto err_out_free_regions; - } + if (sizeof(dma_addr_t) > sizeof(u32)) { + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) + using_dac = 1; + } - } else { + if (!using_dac) { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "%s no usable DMA configuration\n", @@ -3105,7 +3052,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out_free_regions; } } - #ifdef __BIG_ENDIAN /* byte swap descriptors in hardware */ { @@ -3118,13 +3064,14 @@ static int __devinit sky2_probe(struct pci_dev *pdev, #endif err = -ENOMEM; - hw = kzalloc(sizeof(*hw), GFP_KERNEL); + hw = kmalloc(sizeof(*hw), GFP_KERNEL); if (!hw) { printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", pci_name(pdev)); goto err_out_free_regions; } + memset(hw, 0, sizeof(*hw)); hw->pdev = pdev; hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); diff --git a/trunk/drivers/net/spider_net.c b/trunk/drivers/net/spider_net.c index 1f5975a61e1f..0d765f1733b5 100644 --- a/trunk/drivers/net/spider_net.c +++ b/trunk/drivers/net/spider_net.c @@ -22,6 +22,7 @@ */ #include + #include #include #include @@ -29,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -109,6 +108,42 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) writel(value, card->regs + reg); } +/** + * spider_net_write_reg_sync - writes to an SMMIO register of a card + * @card: device structure + * @reg: register to write to + * @value: value to write into the specified SMMIO register + * + * Unlike spider_net_write_reg, this will also make sure the + * data arrives on the card by reading the reg again. + */ +static void +spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value) +{ + value = cpu_to_le32(value); + writel(value, card->regs + reg); + (void)readl(card->regs + reg); +} + +/** + * spider_net_rx_irq_off - switch off rx irq on this spider card + * @card: device structure + * + * switches off rx irq by masking them out in the GHIINTnMSK register + */ +static void +spider_net_rx_irq_off(struct spider_net_card *card) +{ + u32 regvalue; + unsigned long flags; + + spin_lock_irqsave(&card->intmask_lock, flags); + regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); + regvalue &= ~SPIDER_NET_RXINT; + spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue); + spin_unlock_irqrestore(&card->intmask_lock, flags); +} + /** spider_net_write_phy - write to phy register * @netdev: adapter to be written to * @mii_id: id of MII @@ -164,33 +199,60 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) } /** - * spider_net_rx_irq_off - switch off rx irq on this spider card + * spider_net_rx_irq_on - switch on rx irq on this spider card * @card: device structure * - * switches off rx irq by masking them out in the GHIINTnMSK register + * switches on rx irq by enabling them in the GHIINTnMSK register */ static void -spider_net_rx_irq_off(struct spider_net_card *card) +spider_net_rx_irq_on(struct spider_net_card *card) { u32 regvalue; + unsigned long flags; - regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT); - spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); + spin_lock_irqsave(&card->intmask_lock, flags); + regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); + regvalue |= SPIDER_NET_RXINT; + spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue); + spin_unlock_irqrestore(&card->intmask_lock, flags); } /** - * spider_net_rx_irq_on - switch on rx irq on this spider card + * spider_net_tx_irq_off - switch off tx irq on this spider card * @card: device structure * - * switches on rx irq by enabling them in the GHIINTnMSK register + * switches off tx irq by masking them out in the GHIINTnMSK register */ static void -spider_net_rx_irq_on(struct spider_net_card *card) +spider_net_tx_irq_off(struct spider_net_card *card) { u32 regvalue; + unsigned long flags; - regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT; - spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); + spin_lock_irqsave(&card->intmask_lock, flags); + regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); + regvalue &= ~SPIDER_NET_TXINT; + spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue); + spin_unlock_irqrestore(&card->intmask_lock, flags); +} + +/** + * spider_net_tx_irq_on - switch on tx irq on this spider card + * @card: device structure + * + * switches on tx irq by enabling them in the GHIINTnMSK register + */ +static void +spider_net_tx_irq_on(struct spider_net_card *card) +{ + u32 regvalue; + unsigned long flags; + + spin_lock_irqsave(&card->intmask_lock, flags); + regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); + regvalue |= SPIDER_NET_TXINT; + spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue); + spin_unlock_irqrestore(&card->intmask_lock, flags); } /** @@ -264,8 +326,9 @@ static enum spider_net_descr_status spider_net_get_descr_status(struct spider_net_descr *descr) { u32 cmd_status; - + rmb(); cmd_status = descr->dmac_cmd_status; + rmb(); cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; /* no need to mask out any bits, as cmd_status is 32 bits wide only * (and unsigned) */ @@ -286,6 +349,7 @@ spider_net_set_descr_status(struct spider_net_descr *descr, { u32 cmd_status; /* read the status */ + mb(); cmd_status = descr->dmac_cmd_status; /* clean the upper 4 bits */ cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; @@ -293,6 +357,7 @@ spider_net_set_descr_status(struct spider_net_descr *descr, cmd_status |= ((u32)status)<dmac_cmd_status = cmd_status; + wmb(); } /** @@ -333,9 +398,8 @@ spider_net_init_chain(struct spider_net_card *card, { int i; struct spider_net_descr *descr; - dma_addr_t buf; - atomic_set(&card->rx_chain_refill,0); + spin_lock_init(&card->chain_lock); descr = start_descr; memset(descr, 0, sizeof(*descr) * no); @@ -344,14 +408,14 @@ spider_net_init_chain(struct spider_net_card *card, for (i=0; ipdev, descr, - SPIDER_NET_DESCR_SIZE, - PCI_DMA_BIDIRECTIONAL); + descr->bus_addr = + pci_map_single(card->pdev, descr, + SPIDER_NET_DESCR_SIZE, + PCI_DMA_BIDIRECTIONAL); - if (buf == DMA_ERROR_CODE) + if (descr->bus_addr == DMA_ERROR_CODE) goto iommu_error; - descr->bus_addr = buf; descr->next = descr + 1; descr->prev = descr - 1; @@ -375,8 +439,7 @@ spider_net_init_chain(struct spider_net_card *card, for (i=0; i < no; i++, descr++) if (descr->bus_addr) pci_unmap_single(card->pdev, descr->bus_addr, - SPIDER_NET_DESCR_SIZE, - PCI_DMA_BIDIRECTIONAL); + SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); return -ENOMEM; } @@ -396,7 +459,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) if (descr->skb) { dev_kfree_skb(descr->skb); pci_unmap_single(card->pdev, descr->buf_addr, - SPIDER_NET_MAX_FRAME, + SPIDER_NET_MAX_MTU, PCI_DMA_BIDIRECTIONAL); } descr = descr->next; @@ -417,13 +480,12 @@ static int spider_net_prepare_rx_descr(struct spider_net_card *card, struct spider_net_descr *descr) { - dma_addr_t buf; int error = 0; int offset; int bufsize; /* we need to round up the buffer size to a multiple of 128 */ - bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) & + bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & (~(SPIDER_NET_RXBUF_ALIGN - 1)); /* and we need to have it 128 byte aligned, therefore we allocate a @@ -431,8 +493,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, /* allocate an skb */ descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); if (!descr->skb) { - if (netif_msg_rx_err(card) && net_ratelimit()) - pr_err("Not enough memory to allocate rx buffer\n"); + if (net_ratelimit()) + if (netif_msg_rx_err(card)) + pr_err("Not enough memory to allocate " + "rx buffer\n"); return -ENOMEM; } descr->buf_size = bufsize; @@ -446,12 +510,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, if (offset) skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); /* io-mmu-map the skb */ - buf = pci_map_single(card->pdev, descr->skb->data, - SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); - descr->buf_addr = buf; - if (buf == DMA_ERROR_CODE) { + descr->buf_addr = pci_map_single(card->pdev, descr->skb->data, + SPIDER_NET_MAX_MTU, + PCI_DMA_BIDIRECTIONAL); + if (descr->buf_addr == DMA_ERROR_CODE) { dev_kfree_skb_any(descr->skb); - if (netif_msg_rx_err(card) && net_ratelimit()) + if (netif_msg_rx_err(card)) pr_err("Could not iommu-map rx buffer\n"); spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); } else { @@ -462,10 +526,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, } /** - * spider_net_enable_rxchtails - sets RX dmac chain tail addresses + * spider_net_enable_rxctails - sets RX dmac chain tail addresses * @card: card structure * - * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the + * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the * chip by writing to the appropriate register. DMA is enabled in * spider_net_enable_rxdmac. */ @@ -487,7 +551,6 @@ spider_net_enable_rxchtails(struct spider_net_card *card) static void spider_net_enable_rxdmac(struct spider_net_card *card) { - wmb(); spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, SPIDER_NET_DMA_RX_VALUE); } @@ -496,28 +559,32 @@ spider_net_enable_rxdmac(struct spider_net_card *card) * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains * @card: card structure * - * refills descriptors in the rx chain: allocates skbs and iommu-maps them. + * refills descriptors in all chains (last used chain first): allocates skbs + * and iommu-maps them. */ static void spider_net_refill_rx_chain(struct spider_net_card *card) { struct spider_net_descr_chain *chain; + int count = 0; + unsigned long flags; chain = &card->rx_chain; - /* one context doing the refill (and a second context seeing that - * and omitting it) is ok. If called by NAPI, we'll be called again - * as spider_net_decode_one_descr is called several times. If some - * interrupt calls us, the NAPI is about to clean up anyway. */ - if (atomic_inc_return(&card->rx_chain_refill) == 1) - while (spider_net_get_descr_status(chain->head) == - SPIDER_NET_DESCR_NOT_IN_USE) { - if (spider_net_prepare_rx_descr(card, chain->head)) - break; - chain->head = chain->head->next; - } + spin_lock_irqsave(&card->chain_lock, flags); + while (spider_net_get_descr_status(chain->head) == + SPIDER_NET_DESCR_NOT_IN_USE) { + if (spider_net_prepare_rx_descr(card, chain->head)) + break; + count++; + chain->head = chain->head->next; + } + spin_unlock_irqrestore(&card->chain_lock, flags); - atomic_dec(&card->rx_chain_refill); + /* could be optimized, only do that, if we know the DMA processing + * has terminated */ + if (count) + spider_net_enable_rxdmac(card); } /** @@ -546,7 +613,6 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card) /* this will allocate the rest of the rx buffers; if not, it's * business as usual later on */ spider_net_refill_rx_chain(card); - spider_net_enable_rxdmac(card); return 0; error: @@ -583,30 +649,24 @@ spider_net_release_tx_descr(struct spider_net_card *card, * @card: adapter structure * @brutal: if set, don't care about whether descriptor seems to be in use * - * returns 0 if the tx ring is empty, otherwise 1. - * - * spider_net_release_tx_chain releases the tx descriptors that spider has - * finished with (if non-brutal) or simply release tx descriptors (if brutal). - * If some other context is calling this function, we return 1 so that we're - * scheduled again (if we were scheduled) and will not loose initiative. + * releases the tx descriptors that spider has finished with (if non-brutal) + * or simply release tx descriptors (if brutal) */ -static int +static void spider_net_release_tx_chain(struct spider_net_card *card, int brutal) { struct spider_net_descr_chain *tx_chain = &card->tx_chain; enum spider_net_descr_status status; - if (atomic_inc_return(&card->tx_chain_release) != 1) { - atomic_dec(&card->tx_chain_release); - return 1; - } + spider_net_tx_irq_off(card); + /* no lock for chain needed, if this is only executed once at a time */ +again: for (;;) { status = spider_net_get_descr_status(tx_chain->tail); switch (status) { case SPIDER_NET_DESCR_CARDOWNED: - if (!brutal) - goto out; + if (!brutal) goto out; /* fallthrough, if we release the descriptors * brutally (then we don't care about * SPIDER_NET_DESCR_CARDOWNED) */ @@ -633,30 +693,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) tx_chain->tail = tx_chain->tail->next; } out: - atomic_dec(&card->tx_chain_release); - netif_wake_queue(card->netdev); - if (status == SPIDER_NET_DESCR_CARDOWNED) - return 1; - return 0; -} - -/** - * spider_net_cleanup_tx_ring - cleans up the TX ring - * @card: card structure - * - * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use - * interrupts to cleanup our TX ring) and returns sent packets to the stack - * by freeing them - */ -static void -spider_net_cleanup_tx_ring(struct spider_net_card *card) -{ - if ( (spider_net_release_tx_chain(card, 0)) && - (card->netdev->flags & IFF_UP) ) { - mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); + if (!brutal) { + /* switch on tx irqs (while we are still in the interrupt + * handler, so we don't get an interrupt), check again + * for done descriptors. This results in fewer interrupts */ + spider_net_tx_irq_on(card); + status = spider_net_get_descr_status(tx_chain->tail); + switch (status) { + case SPIDER_NET_DESCR_RESPONSE_ERROR: + case SPIDER_NET_DESCR_PROTECTION_ERROR: + case SPIDER_NET_DESCR_FORCE_END: + case SPIDER_NET_DESCR_COMPLETE: + goto again; + default: + break; + } } + } /** @@ -671,22 +726,16 @@ spider_net_cleanup_tx_ring(struct spider_net_card *card) static u8 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) { + /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9, + * ff:ff:ff:ff:ff:ff must result in 0xfd */ u32 crc; u8 hash; - char addr_for_crc[ETH_ALEN] = { 0, }; - int i, bit; - for (i = 0; i < ETH_ALEN * 8; i++) { - bit = (addr[i / 8] >> (i % 8)) & 1; - addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8)); - } - - crc = crc32_be(~0, addr_for_crc, netdev->addr_len); + crc = crc32_be(~0, addr, netdev->addr_len); hash = (crc >> 27); hash <<= 3; hash |= crc & 7; - hash &= 0xff; return hash; } @@ -772,11 +821,9 @@ spider_net_stop(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); - tasklet_kill(&card->rxram_full_tl); netif_poll_disable(netdev); netif_carrier_off(netdev); netif_stop_queue(netdev); - del_timer_sync(&card->tx_timer); /* disable/mask all interrupts */ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); @@ -825,15 +872,13 @@ spider_net_get_next_tx_descr(struct spider_net_card *card) * @skb: packet to consider * * fills out the command and status field of the descriptor structure, - * depending on hardware checksum settings. + * depending on hardware checksum settings. This function assumes a wmb() + * has executed before. */ static void spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, struct sk_buff *skb) { - /* make sure the other fields in the descriptor are written */ - wmb(); - if (skb->ip_summed != CHECKSUM_HW) { descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; return; @@ -842,13 +887,14 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, /* is packet ip? * if yes: tcp? udp? */ if (skb->protocol == htons(ETH_P_IP)) { - if (skb->nh.iph->protocol == IPPROTO_TCP) + if (skb->nh.iph->protocol == IPPROTO_TCP) { descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; - else if (skb->nh.iph->protocol == IPPROTO_UDP) + } else if (skb->nh.iph->protocol == IPPROTO_UDP) { descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; - else /* the stack should checksum non-tcp and non-udp - packets on his own: NETIF_F_IP_CSUM */ + } else { /* the stack should checksum non-tcp and non-udp + packets on his own: NETIF_F_IP_CSUM */ descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; + } } } @@ -868,22 +914,23 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, struct spider_net_descr *descr, struct sk_buff *skb) { - dma_addr_t buf; - - buf = pci_map_single(card->pdev, skb->data, - skb->len, PCI_DMA_BIDIRECTIONAL); - if (buf == DMA_ERROR_CODE) { - if (netif_msg_tx_err(card) && net_ratelimit()) + descr->buf_addr = pci_map_single(card->pdev, skb->data, + skb->len, PCI_DMA_BIDIRECTIONAL); + if (descr->buf_addr == DMA_ERROR_CODE) { + if (netif_msg_tx_err(card)) pr_err("could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); return -ENOMEM; } - descr->buf_addr = buf; descr->buf_size = skb->len; descr->skb = skb; descr->data_status = 0; + /* make sure the above values are in memory before we change the + * status */ + wmb(); + spider_net_set_txdescr_cmdstat(descr,skb); return 0; @@ -925,12 +972,17 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) struct spider_net_descr *descr; int result; - spider_net_release_tx_chain(card, 0); - descr = spider_net_get_next_tx_descr(card); - if (!descr) - goto error; + if (!descr) { + netif_stop_queue(netdev); + + descr = spider_net_get_next_tx_descr(card); + if (!descr) + goto error; + else + netif_start_queue(netdev); + } result = spider_net_prepare_tx_descr(card, descr, skb); if (result) @@ -938,25 +990,19 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) card->tx_chain.head = card->tx_chain.head->next; - if (spider_net_get_descr_status(descr->prev) != - SPIDER_NET_DESCR_CARDOWNED) { - /* make sure the current descriptor is in memory. Then - * kicking it on again makes sense, if the previous is not - * card-owned anymore. Check the previous descriptor twice - * to omit an mb() in heavy traffic cases */ - mb(); - if (spider_net_get_descr_status(descr->prev) != - SPIDER_NET_DESCR_CARDOWNED) - spider_net_kick_tx_dma(card, descr); - } + /* make sure the status from spider_net_prepare_tx_descr is in + * memory before we check out the previous descriptor */ + wmb(); - mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); + if (spider_net_get_descr_status(descr->prev) != + SPIDER_NET_DESCR_CARDOWNED) + spider_net_kick_tx_dma(card, descr); return NETDEV_TX_OK; error: card->netdev_stats.tx_dropped++; - return NETDEV_TX_BUSY; + return NETDEV_TX_LOCKED; } /** @@ -981,7 +1027,6 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on * @descr: descriptor to process * @card: card structure - * @napi: whether caller is in NAPI context * * returns 1 on success, 0 if no packet was passed to the stack * @@ -990,7 +1035,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) */ static int spider_net_pass_skb_up(struct spider_net_descr *descr, - struct spider_net_card *card, int napi) + struct spider_net_card *card) { struct sk_buff *skb; struct net_device *netdev; @@ -1001,20 +1046,22 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, netdev = card->netdev; - /* unmap descriptor */ - pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, + /* check for errors in the data_error flag */ + if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && + netif_msg_rx_err(card)) + pr_err("error in received descriptor found, " + "data_status=x%08x, data_error=x%08x\n", + data_status, data_error); + + /* prepare skb, unmap descriptor */ + skb = descr->skb; + pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU, PCI_DMA_BIDIRECTIONAL); /* the cases we'll throw away the packet immediately */ - if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { - if (netif_msg_rx_err(card)) - pr_err("error in received descriptor found, " - "data_status=x%08x, data_error=x%08x\n", - data_status, data_error); + if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) return 0; - } - skb = descr->skb; skb->dev = netdev; skb_put(skb, descr->valid_size); @@ -1026,14 +1073,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, /* checksum offload */ if (card->options.rx_csum) { - if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) == - SPIDER_NET_DATA_STATUS_CKSUM_MASK) && - !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) + if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && + (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; - } else + } else { skb->ip_summed = CHECKSUM_NONE; + } if (data_status & SPIDER_NET_VLAN_PACKET) { /* further enhancements: HW-accel VLAN @@ -1042,10 +1089,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, } /* pass skb up to stack */ - if (napi) - netif_receive_skb(skb); - else - netif_rx_ni(skb); + netif_receive_skb(skb); /* update netdevice statistics */ card->netdev_stats.rx_packets++; @@ -1055,18 +1099,16 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, } /** - * spider_net_decode_one_descr - processes an rx descriptor + * spider_net_decode_descr - processes an rx descriptor * @card: card structure - * @napi: whether caller is in NAPI context * * returns 1 if a packet has been sent to the stack, otherwise 0 * * processes an rx descriptor by iommu-unmapping the data buffer and passing - * the packet up to the stack. This function is called in softirq - * context, e.g. either bottom half from interrupt or NAPI polling context + * the packet up to the stack */ static int -spider_net_decode_one_descr(struct spider_net_card *card, int napi) +spider_net_decode_one_descr(struct spider_net_card *card) { enum spider_net_descr_status status; struct spider_net_descr *descr; @@ -1080,19 +1122,17 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) if (status == SPIDER_NET_DESCR_CARDOWNED) { /* nothing in the descriptor yet */ - result=0; - goto out; + return 0; } if (status == SPIDER_NET_DESCR_NOT_IN_USE) { - /* not initialized yet, the ring must be empty */ + /* not initialized yet, I bet chain->tail == chain->head + * and the ring is empty */ spider_net_refill_rx_chain(card); - spider_net_enable_rxdmac(card); - result=0; - goto out; + return 0; } - /* descriptor definitively used -- move on tail */ + /* descriptor definitively used -- move on head */ chain->tail = descr->next; result = 0; @@ -1103,9 +1143,6 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) pr_err("%s: dropping RX descriptor with state %d\n", card->netdev->name, status); card->netdev_stats.rx_dropped++; - pci_unmap_single(card->pdev, descr->buf_addr, - SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); - dev_kfree_skb_irq(descr->skb); goto refill; } @@ -1118,13 +1155,12 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) } /* ok, we've got a packet in descr */ - result = spider_net_pass_skb_up(descr, card, napi); + result = spider_net_pass_skb_up(descr, card); refill: spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); /* change the descriptor state: */ - if (!napi) - spider_net_refill_rx_chain(card); -out: + spider_net_refill_rx_chain(card); + return result; } @@ -1150,7 +1186,7 @@ spider_net_poll(struct net_device *netdev, int *budget) packets_to_do = min(*budget, netdev->quota); while (packets_to_do) { - if (spider_net_decode_one_descr(card, 1)) { + if (spider_net_decode_one_descr(card)) { packets_done++; packets_to_do--; } else { @@ -1162,7 +1198,6 @@ spider_net_poll(struct net_device *netdev, int *budget) netdev->quota -= packets_done; *budget -= packets_done; - spider_net_refill_rx_chain(card); /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ @@ -1306,24 +1341,6 @@ spider_net_enable_txdmac(struct spider_net_card *card) card->tx_chain.tail->bus_addr); } -/** - * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt - * @card: card structure - * - * spider_net_handle_rxram_full empties the RX ring so that spider can put - * more packets in it and empty its RX RAM. This is called in bottom half - * context - */ -static void -spider_net_handle_rxram_full(struct spider_net_card *card) -{ - while (spider_net_decode_one_descr(card, 0)) - ; - spider_net_enable_rxchtails(card); - spider_net_enable_rxdmac(card); - netif_rx_schedule(card->netdev); -} - /** * spider_net_handle_error_irq - handles errors raised by an interrupt * @card: card structure @@ -1432,21 +1449,17 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) switch (i) { case SPIDER_NET_GTMFLLINT: - if (netif_msg_intr(card) && net_ratelimit()) + if (netif_msg_intr(card)) pr_err("Spider TX RAM full\n"); show_error = 0; break; - case SPIDER_NET_GRFDFLLINT: /* fallthrough */ - case SPIDER_NET_GRFCFLLINT: /* fallthrough */ - case SPIDER_NET_GRFBFLLINT: /* fallthrough */ - case SPIDER_NET_GRFAFLLINT: /* fallthrough */ case SPIDER_NET_GRMFLLINT: - if (netif_msg_intr(card) && net_ratelimit()) + if (netif_msg_intr(card)) pr_err("Spider RX RAM full, incoming packets " - "might be discarded!\n"); - spider_net_rx_irq_off(card); - tasklet_schedule(&card->rxram_full_tl); - show_error = 0; + "might be discarded !\n"); + netif_rx_schedule(card->netdev); + spider_net_enable_rxchtails(card); + spider_net_enable_rxdmac(card); break; /* case SPIDER_NET_GTMSHTINT: problem, print a message */ @@ -1454,6 +1467,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) /* allrighty. tx from previous descr ok */ show_error = 0; break; + /* case SPIDER_NET_GRFDFLLINT: print a message down there */ + /* case SPIDER_NET_GRFCFLLINT: print a message down there */ + /* case SPIDER_NET_GRFBFLLINT: print a message down there */ + /* case SPIDER_NET_GRFAFLLINT: print a message down there */ /* chain end */ case SPIDER_NET_GDDDCEINT: /* fallthrough */ @@ -1465,7 +1482,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) "restarting DMAC %c.\n", 'D'+i-SPIDER_NET_GDDDCEINT); spider_net_refill_rx_chain(card); - spider_net_enable_rxdmac(card); show_error = 0; break; @@ -1476,7 +1492,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) case SPIDER_NET_GDAINVDINT: /* could happen when rx chain is full */ spider_net_refill_rx_chain(card); - spider_net_enable_rxdmac(card); show_error = 0; break; @@ -1565,13 +1580,17 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs) if (!status_reg) return IRQ_NONE; + if (status_reg & SPIDER_NET_TXINT) + spider_net_release_tx_chain(card, 0); + if (status_reg & SPIDER_NET_RXINT ) { spider_net_rx_irq_off(card); netif_rx_schedule(netdev); } - if (status_reg & SPIDER_NET_ERRINT ) - spider_net_handle_error_irq(card, status_reg); + /* we do this after rx and tx processing, as we want the tx chain + * processed to see, whether we should restart tx dma processing */ + spider_net_handle_error_irq(card, status_reg); /* clear interrupt sources */ spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); @@ -1812,40 +1831,34 @@ spider_net_setup_phy(struct spider_net_card *card) /** * spider_net_download_firmware - loads firmware into the adapter * @card: card structure - * @firmware_ptr: pointer to firmware data + * @firmware: firmware pointer * - * spider_net_download_firmware loads the firmware data into the - * adapter. It assumes the length etc. to be allright. + * spider_net_download_firmware loads the firmware opened by + * spider_net_init_firmware into the adapter. */ -static int +static void spider_net_download_firmware(struct spider_net_card *card, - u8 *firmware_ptr) + const struct firmware *firmware) { int sequencer, i; - u32 *fw_ptr = (u32 *)firmware_ptr; + u32 *fw_ptr = (u32 *)firmware->data; /* stop sequencers */ spider_net_write_reg(card, SPIDER_NET_GSINIT, SPIDER_NET_STOP_SEQ_VALUE); - for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; - sequencer++) { + for (sequencer = 0; sequencer < 6; sequencer++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + sequencer * 8, 0); - for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { + for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + sequencer * 8, *fw_ptr); fw_ptr++; } } - if (spider_net_read_reg(card, SPIDER_NET_GSINIT)) - return -EIO; - spider_net_write_reg(card, SPIDER_NET_GSINIT, SPIDER_NET_RUN_SEQ_VALUE); - - return 0; } /** @@ -1877,53 +1890,31 @@ spider_net_download_firmware(struct spider_net_card *card, static int spider_net_init_firmware(struct spider_net_card *card) { - struct firmware *firmware = NULL; - struct device_node *dn; - u8 *fw_prop = NULL; - int err = -ENOENT; - int fw_size; - - if (request_firmware((const struct firmware **)&firmware, - SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) { - if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) && - netif_msg_probe(card) ) { - pr_err("Incorrect size of spidernet firmware in " \ - "filesystem. Looking in host firmware...\n"); - goto try_host_fw; - } - err = spider_net_download_firmware(card, firmware->data); - - release_firmware(firmware); - if (err) - goto try_host_fw; + const struct firmware *firmware; + int err = -EIO; - goto done; + if (request_firmware(&firmware, + SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { + if (netif_msg_probe(card)) + pr_err("Couldn't read in sequencer data file %s.\n", + SPIDER_NET_FIRMWARE_NAME); + firmware = NULL; + goto out; } -try_host_fw: - dn = pci_device_to_OF_node(card->pdev); - if (!dn) - goto out_err; - - fw_prop = (u8 *)get_property(dn, "firmware", &fw_size); - if (!fw_prop) - goto out_err; - - if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) && - netif_msg_probe(card) ) { - pr_err("Incorrect size of spidernet firmware in " \ - "host firmware\n"); - goto done; + if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { + if (netif_msg_probe(card)) + pr_err("Invalid size of sequencer data file %s.\n", + SPIDER_NET_FIRMWARE_NAME); + goto out; } - err = spider_net_download_firmware(card, fw_prop); + spider_net_download_firmware(card, firmware); + + err = 0; +out: + release_firmware(firmware); -done: - return err; -out_err: - if (netif_msg_probe(card)) - pr_err("Couldn't find spidernet firmware in filesystem " \ - "or host firmware\n"); return err; } @@ -1943,11 +1934,10 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) SPIDER_NET_CKRCTRL_RUN_VALUE); /* empty sequencer data */ - for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; - sequencer++) { + for (sequencer = 0; sequencer < 6; sequencer++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + sequencer * 8, 0x0); - for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { + for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + sequencer * 8, 0x0); } @@ -2071,15 +2061,7 @@ spider_net_setup_netdev(struct spider_net_card *card) SET_NETDEV_DEV(netdev, &card->pdev->dev); pci_set_drvdata(card->pdev, netdev); - - atomic_set(&card->tx_chain_release,0); - card->rxram_full_tl.data = (unsigned long) card; - card->rxram_full_tl.func = - (void (*)(unsigned long)) spider_net_handle_rxram_full; - init_timer(&card->tx_timer); - card->tx_timer.function = - (void (*)(unsigned long)) spider_net_cleanup_tx_ring; - card->tx_timer.data = (unsigned long) card; + spin_lock_init(&card->intmask_lock); netdev->irq = card->pdev->irq; card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; diff --git a/trunk/drivers/net/spider_net.h b/trunk/drivers/net/spider_net.h index 5922b529a048..22b2f2347351 100644 --- a/trunk/drivers/net/spider_net.h +++ b/trunk/drivers/net/spider_net.h @@ -33,32 +33,25 @@ extern struct ethtool_ops spider_net_ethtool_ops; extern char spider_net_driver_name[]; -#define SPIDER_NET_MAX_FRAME 2312 -#define SPIDER_NET_MAX_MTU 2294 +#define SPIDER_NET_MAX_MTU 2308 #define SPIDER_NET_MIN_MTU 64 #define SPIDER_NET_RXBUF_ALIGN 128 -#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256 +#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64 #define SPIDER_NET_RX_DESCRIPTORS_MIN 16 -#define SPIDER_NET_RX_DESCRIPTORS_MAX 512 +#define SPIDER_NET_RX_DESCRIPTORS_MAX 256 -#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256 +#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64 #define SPIDER_NET_TX_DESCRIPTORS_MIN 16 -#define SPIDER_NET_TX_DESCRIPTORS_MAX 512 - -#define SPIDER_NET_TX_TIMER 20 +#define SPIDER_NET_TX_DESCRIPTORS_MAX 256 #define SPIDER_NET_RX_CSUM_DEFAULT 1 -#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ -#define SPIDER_NET_NAPI_WEIGHT 64 +#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ +#define SPIDER_NET_NAPI_WEIGHT 64 -#define SPIDER_NET_FIRMWARE_SEQS 6 -#define SPIDER_NET_FIRMWARE_SEQWORDS 1024 -#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \ - SPIDER_NET_FIRMWARE_SEQWORDS * \ - sizeof(u32)) +#define SPIDER_NET_FIRMWARE_LEN 1024 #define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin" /** spider_net SMMIO registers */ @@ -149,12 +142,14 @@ extern char spider_net_driver_name[]; /** SCONFIG registers */ #define SPIDER_NET_SCONFIG_IOACTE 0x00002810 -/** interrupt mask registers */ -#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7 -#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7 +/** hardcoded register values */ +#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff +#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff /* no MAC aborts -> auto retransmission */ -#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1 +#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1 +/* clear counter when interrupt sources are cleared +#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */ /* we rely on flagged descriptor interrupts */ #define SPIDER_NET_FRAMENUM_VALUE 0x00000000 /* set this first, then the FRAMENUM_VALUE */ @@ -173,7 +168,7 @@ extern char spider_net_driver_name[]; #if 0 #define SPIDER_NET_WOL_VALUE 0x00000000 #endif -#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71 +#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8 /* pause frames: automatic, no upper retransmission count */ /* outside loopback mode: ETOMOD signal dont matter, not connected */ @@ -323,10 +318,6 @@ enum spider_net_int2_status { #define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ (1 << SPIDER_NET_GRMFLLINT) ) -#define SPIDER_NET_ERRINT ( 0xffffffff & \ - (~SPIDER_NET_TXINT) & \ - (~SPIDER_NET_RXINT) ) - #define SPIDER_NET_GPREXEC 0x80000000 #define SPIDER_NET_GPRDAT_MASK 0x0000ffff @@ -367,6 +358,9 @@ enum spider_net_int2_status { /* descr ready, descr is in middle of chain, get interrupt on completion */ #define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 +/* multicast is no problem */ +#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff + enum spider_net_descr_status { SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ @@ -379,9 +373,9 @@ enum spider_net_descr_status { struct spider_net_descr { /* as defined by the hardware */ - u32 buf_addr; + dma_addr_t buf_addr; u32 buf_size; - u32 next_descr_addr; + dma_addr_t next_descr_addr; u32 dmac_cmd_status; u32 result_size; u32 valid_size; /* all zeroes for tx */ @@ -390,7 +384,7 @@ struct spider_net_descr { /* used in the driver */ struct sk_buff *skb; - u32 bus_addr; + dma_addr_t bus_addr; struct spider_net_descr *next; struct spider_net_descr *prev; } __attribute__((aligned(32))); @@ -402,21 +396,21 @@ struct spider_net_descr_chain { }; /* descriptor data_status bits */ -#define SPIDER_NET_RX_IPCHK 29 -#define SPIDER_NET_RX_TCPCHK 28 +#define SPIDER_NET_RXIPCHK 29 +#define SPIDER_NET_TCPUDPIPCHK 28 +#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \ + 1 << SPIDER_NET_TCPUDPIPCHK) + #define SPIDER_NET_VLAN_PACKET 21 -#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \ - (1 << SPIDER_NET_RX_TCPCHK) ) /* descriptor data_error bits */ -#define SPIDER_NET_RX_IPCHKERR 27 -#define SPIDER_NET_RX_RXTCPCHKERR 28 - -#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR) +#define SPIDER_NET_RXIPCHKERR 27 +#define SPIDER_NET_RXTCPCHKERR 26 +#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \ + 1 << SPIDER_NET_RXTCPCHKERR) -/* the cases we don't pass the packet to the stack. - * 701b8000 would be correct, but every packets gets that flag */ -#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000 +/* the cases we don't pass the packet to the stack */ +#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000 #define SPIDER_NET_DESCR_SIZE 32 @@ -451,16 +445,13 @@ struct spider_net_card { struct spider_net_descr_chain tx_chain; struct spider_net_descr_chain rx_chain; - atomic_t rx_chain_refill; - atomic_t tx_chain_release; + spinlock_t chain_lock; struct net_device_stats netdev_stats; struct spider_net_options options; spinlock_t intmask_lock; - struct tasklet_struct rxram_full_tl; - struct timer_list tx_timer; struct work_struct tx_timeout_task; atomic_t tx_timeout_task_counter; diff --git a/trunk/drivers/net/spider_net_ethtool.c b/trunk/drivers/net/spider_net_ethtool.c index a5bb0b7633af..d42e60ba74ce 100644 --- a/trunk/drivers/net/spider_net_ethtool.c +++ b/trunk/drivers/net/spider_net_ethtool.c @@ -113,23 +113,6 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n) return 0; } -static uint32_t -spider_net_ethtool_get_tx_csum(struct net_device *netdev) -{ - return (netdev->features & NETIF_F_HW_CSUM) != 0; -} - -static int -spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data) -{ - if (data) - netdev->features |= NETIF_F_HW_CSUM; - else - netdev->features &= ~NETIF_F_HW_CSUM; - - return 0; -} - struct ethtool_ops spider_net_ethtool_ops = { .get_settings = spider_net_ethtool_get_settings, .get_drvinfo = spider_net_ethtool_get_drvinfo, @@ -139,7 +122,5 @@ struct ethtool_ops spider_net_ethtool_ops = { .nway_reset = spider_net_ethtool_nway_reset, .get_rx_csum = spider_net_ethtool_get_rx_csum, .set_rx_csum = spider_net_ethtool_set_rx_csum, - .get_tx_csum = spider_net_ethtool_get_tx_csum, - .set_tx_csum = spider_net_ethtool_set_tx_csum, }; diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index f2d1dafde087..eb86b059809b 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -69,8 +69,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.48" -#define DRV_MODULE_RELDATE "Jan 16, 2006" +#define DRV_MODULE_VERSION "3.47" +#define DRV_MODULE_RELDATE "Dec 28, 2005" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -1325,12 +1325,10 @@ static int tg3_set_power_state(struct tg3 *tp, int state) val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); tw32(0x7d00, val); if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { - int err; - - err = tg3_nvram_lock(tp); + tg3_nvram_lock(tp); tg3_halt_cpu(tp, RX_CPU_BASE); - if (!err) - tg3_nvram_unlock(tp); + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0); + tg3_nvram_unlock(tp); } } @@ -4195,19 +4193,14 @@ static int tg3_nvram_lock(struct tg3 *tp) if (tp->tg3_flags & TG3_FLAG_NVRAM) { int i; - if (tp->nvram_lock_cnt == 0) { - tw32(NVRAM_SWARB, SWARB_REQ_SET1); - for (i = 0; i < 8000; i++) { - if (tr32(NVRAM_SWARB) & SWARB_GNT1) - break; - udelay(20); - } - if (i == 8000) { - tw32(NVRAM_SWARB, SWARB_REQ_CLR1); - return -ENODEV; - } + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); } - tp->nvram_lock_cnt++; + if (i == 8000) + return -ENODEV; } return 0; } @@ -4215,12 +4208,8 @@ static int tg3_nvram_lock(struct tg3 *tp) /* tp->lock is held. */ static void tg3_nvram_unlock(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_NVRAM) { - if (tp->nvram_lock_cnt > 0) - tp->nvram_lock_cnt--; - if (tp->nvram_lock_cnt == 0) - tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); - } + if (tp->tg3_flags & TG3_FLAG_NVRAM) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); } /* tp->lock is held. */ @@ -4331,13 +4320,8 @@ static int tg3_chip_reset(struct tg3 *tp) void (*write_op)(struct tg3 *, u32, u32); int i; - if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { + if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) tg3_nvram_lock(tp); - /* No matching tg3_nvram_unlock() after this because - * chip reset below will undo the nvram lock. - */ - tp->nvram_lock_cnt = 0; - } /* * We must avoid the readl() that normally takes place. @@ -4733,10 +4717,6 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) (offset == RX_CPU_BASE ? "RX" : "TX")); return -ENODEV; } - - /* Clear firmware's nvram arbitration. */ - if (tp->tg3_flags & TG3_FLAG_NVRAM) - tw32(NVRAM_SWARB, SWARB_REQ_CLR0); return 0; } @@ -4756,7 +4736,7 @@ struct fw_info { static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, int cpu_scratch_size, struct fw_info *info) { - int err, lock_err, i; + int err, i; void (*write_op)(struct tg3 *, u32, u32); if (cpu_base == TX_CPU_BASE && @@ -4775,10 +4755,9 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b /* It is possible that bootcode is still loading at this point. * Get the nvram lock first before halting the cpu. */ - lock_err = tg3_nvram_lock(tp); + tg3_nvram_lock(tp); err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); + tg3_nvram_unlock(tp); if (err) goto out; @@ -8203,7 +8182,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, data[1] = 1; } if (etest->flags & ETH_TEST_FL_OFFLINE) { - int err, irq_sync = 0; + int irq_sync = 0; if (netif_running(dev)) { tg3_netif_stop(tp); @@ -8213,12 +8192,11 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_full_lock(tp, irq_sync); tg3_halt(tp, RESET_KIND_SUSPEND, 1); - err = tg3_nvram_lock(tp); + tg3_nvram_lock(tp); tg3_halt_cpu(tp, RX_CPU_BASE); if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tg3_halt_cpu(tp, TX_CPU_BASE); - if (!err) - tg3_nvram_unlock(tp); + tg3_nvram_unlock(tp); if (tg3_test_registers(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; @@ -8610,11 +8588,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { tp->tg3_flags |= TG3_FLAG_NVRAM; - if (tg3_nvram_lock(tp)) { - printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " - "tg3_nvram_init failed.\n", tp->dev->name); - return; - } + tg3_nvram_lock(tp); tg3_enable_nvram_access(tp); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) @@ -8712,9 +8686,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) if (offset > NVRAM_ADDR_MSK) return -EINVAL; - ret = tg3_nvram_lock(tp); - if (ret) - return ret; + tg3_nvram_lock(tp); tg3_enable_nvram_access(tp); @@ -8813,6 +8785,10 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, offset = offset + (pagesize - page_off); + /* Nvram lock released by tg3_nvram_read() above, + * so need to get it again. + */ + tg3_nvram_lock(tp); tg3_enable_nvram_access(tp); /* @@ -8949,9 +8925,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) else { u32 grc_mode; - ret = tg3_nvram_lock(tp); - if (ret) - return ret; + tg3_nvram_lock(tp); tg3_enable_nvram_access(tp); if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && diff --git a/trunk/drivers/net/tg3.h b/trunk/drivers/net/tg3.h index e8243305f0e8..890e1635996b 100644 --- a/trunk/drivers/net/tg3.h +++ b/trunk/drivers/net/tg3.h @@ -2275,7 +2275,6 @@ struct tg3 { dma_addr_t stats_mapping; struct work_struct reset_task; - int nvram_lock_cnt; u32 nvram_size; u32 nvram_pagesize; u32 nvram_jedecnum; diff --git a/trunk/drivers/net/wireless/airo.c b/trunk/drivers/net/wireless/airo.c index a4c7ae94614d..ee866fd6957d 100644 --- a/trunk/drivers/net/wireless/airo.c +++ b/trunk/drivers/net/wireless/airo.c @@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev, int channel = fwrq->m; /* We should do a better check than that, * based on the card capability !!! */ - if((channel < 1) || (channel > 14)) { + if((channel < 1) || (channel > 16)) { printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); rc = -EINVAL; } else { readConfigRid(local, 1); /* Yes ! We can set it !!! */ - local->config.channelSet = (u16) channel; + local->config.channelSet = (u16)(channel - 1); set_bit (FLAG_COMMIT, &local->flags); } } @@ -5692,7 +5692,6 @@ static int airo_get_freq(struct net_device *dev, { struct airo_info *local = dev->priv; StatusRid status_rid; /* Card status info */ - int ch; readConfigRid(local, 1); if ((local->config.opmode & 0xFF) == MODE_STA_ESS) @@ -5700,14 +5699,16 @@ static int airo_get_freq(struct net_device *dev, else readStatusRid(local, &status_rid, 1); - ch = (int)status_rid.channel; - if((ch > 0) && (ch < 15)) { - fwrq->m = frequency_list[ch - 1] * 100000; +#ifdef WEXT_USECHANNELS + fwrq->m = ((int)status_rid.channel) + 1; + fwrq->e = 0; +#else + { + int f = (int)status_rid.channel; + fwrq->m = frequency_list[f] * 100000; fwrq->e = 1; - } else { - fwrq->m = ch; - fwrq->e = 0; } +#endif return 0; } @@ -5782,7 +5783,7 @@ static int airo_get_essid(struct net_device *dev, /* If none, we may want to get the one that was set */ /* Push it out ! */ - dwrq->length = status_rid.SSIDlen; + dwrq->length = status_rid.SSIDlen + 1; dwrq->flags = 1; /* active */ return 0; diff --git a/trunk/drivers/net/wireless/atmel.c b/trunk/drivers/net/wireless/atmel.c index 98a76f10a0f7..f0ccfef66445 100644 --- a/trunk/drivers/net/wireless/atmel.c +++ b/trunk/drivers/net/wireless/atmel.c @@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev, if (priv->new_SSID_size != 0) { memcpy(extra, priv->new_SSID, priv->new_SSID_size); extra[priv->new_SSID_size] = '\0'; - dwrq->length = priv->new_SSID_size; + dwrq->length = priv->new_SSID_size + 1; } else { memcpy(extra, priv->SSID, priv->SSID_size); extra[priv->SSID_size] = '\0'; - dwrq->length = priv->SSID_size; + dwrq->length = priv->SSID_size + 1; } dwrq->flags = !priv->connect_to_any_BSS; /* active */ diff --git a/trunk/drivers/net/wireless/hostap/Kconfig b/trunk/drivers/net/wireless/hostap/Kconfig index c8f6286dd35f..56f41c714d38 100644 --- a/trunk/drivers/net/wireless/hostap/Kconfig +++ b/trunk/drivers/net/wireless/hostap/Kconfig @@ -26,25 +26,11 @@ config HOSTAP_FIRMWARE depends on HOSTAP ---help--- Configure Host AP driver to include support for firmware image - download. This option by itself only enables downloading to the - volatile memory, i.e. the card RAM. This option is required to - support cards that don't have firmware in flash, such as D-Link - DWL-520 rev E and D-Link DWL-650 rev P. + download. Current version supports only downloading to volatile, i.e., + RAM memory. Flash upgrade is not yet supported. - Firmware image downloading needs a user space tool, prism2_srec. - It is available from http://hostap.epitest.fi/. - -config HOSTAP_FIRMWARE_NVRAM - bool "Support for non-volatile firmware download" - depends on HOSTAP_FIRMWARE - ---help--- - Allow Host AP driver to write firmware images to the non-volatile - card memory, i.e. flash memory that survives power cycling. - Enable this option if you want to be able to change card firmware - permanently. - - Firmware image downloading needs a user space tool, prism2_srec. - It is available from http://hostap.epitest.fi/. + Firmware image downloading needs user space tool, prism2_srec. It is + available from http://hostap.epitest.fi/. config HOSTAP_PLX tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors" diff --git a/trunk/drivers/net/wireless/hostap/Makefile b/trunk/drivers/net/wireless/hostap/Makefile index b8e41a702c00..353ccb93134b 100644 --- a/trunk/drivers/net/wireless/hostap/Makefile +++ b/trunk/drivers/net/wireless/hostap/Makefile @@ -1,5 +1,4 @@ -hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \ - hostap_ioctl.o hostap_main.o hostap_proc.o +hostap-y := hostap_main.o obj-$(CONFIG_HOSTAP) += hostap.o obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o diff --git a/trunk/drivers/net/wireless/hostap/hostap.h b/trunk/drivers/net/wireless/hostap/hostap.h index 5e63765219fe..5fac89b8ce3a 100644 --- a/trunk/drivers/net/wireless/hostap/hostap.h +++ b/trunk/drivers/net/wireless/hostap/hostap.h @@ -1,15 +1,6 @@ #ifndef HOSTAP_H #define HOSTAP_H -#include - -#include "hostap_wlan.h" -#include "hostap_ap.h" - -static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, - 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; -#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0])) - /* hostap.c */ extern struct proc_dir_entry *hostap_proc; @@ -49,26 +40,6 @@ int prism2_update_comms_qual(struct net_device *dev); int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, u8 *body, size_t bodylen); int prism2_sta_deauth(local_info_t *local, u16 reason); -int prism2_wds_add(local_info_t *local, u8 *remote_addr, - int rtnl_locked); -int prism2_wds_del(local_info_t *local, u8 *remote_addr, - int rtnl_locked, int do_not_remove); - - -/* hostap_ap.c */ - -int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac); -int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac); -void ap_control_flush_macs(struct mac_restrictions *mac_restrictions); -int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac); -void ap_control_kickall(struct ap_data *ap); -void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, - struct ieee80211_crypt_data ***crypt); -int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], - struct iw_quality qual[], int buf_size, - int aplist); -int prism2_ap_translate_scan(struct net_device *dev, char *buffer); -int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param); /* hostap_proc.c */ @@ -83,12 +54,4 @@ void hostap_info_init(local_info_t *local); void hostap_info_process(local_info_t *local, struct sk_buff *skb); -/* hostap_ioctl.c */ - -extern const struct iw_handler_def hostap_iw_handler_def; -extern struct ethtool_ops prism2_ethtool_ops; - -int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); - - #endif /* HOSTAP_H */ diff --git a/trunk/drivers/net/wireless/hostap/hostap_80211.h b/trunk/drivers/net/wireless/hostap/hostap_80211.h index 1fc72fe511e9..bf506f50d722 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_80211.h +++ b/trunk/drivers/net/wireless/hostap/hostap_80211.h @@ -1,9 +1,6 @@ #ifndef HOSTAP_80211_H #define HOSTAP_80211_H -#include -#include - struct hostap_ieee80211_mgmt { u16 frame_control; u16 duration; diff --git a/trunk/drivers/net/wireless/hostap/hostap_80211_rx.c b/trunk/drivers/net/wireless/hostap/hostap_80211_rx.c index 7e04dc94b3bc..4b13b76425c1 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/trunk/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -1,18 +1,7 @@ #include -#include #include "hostap_80211.h" #include "hostap.h" -#include "hostap_ap.h" - -/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ -/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ -static unsigned char rfc1042_header[] = -{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; -/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ -static unsigned char bridge_tunnel_header[] = -{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; -/* No encapsulation header if EtherType < 0x600 (=length) */ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) diff --git a/trunk/drivers/net/wireless/hostap/hostap_80211_tx.c b/trunk/drivers/net/wireless/hostap/hostap_80211_tx.c index 4a85e63906f1..9d24f8a38ac5 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/trunk/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -1,18 +1,3 @@ -#include "hostap_80211.h" -#include "hostap_common.h" -#include "hostap_wlan.h" -#include "hostap.h" -#include "hostap_ap.h" - -/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ -/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ -static unsigned char rfc1042_header[] = -{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; -/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ -static unsigned char bridge_tunnel_header[] = -{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; -/* No encapsulation header if EtherType < 0x600 (=length) */ - void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { struct ieee80211_hdr_4addr *hdr; diff --git a/trunk/drivers/net/wireless/hostap/hostap_ap.c b/trunk/drivers/net/wireless/hostap/hostap_ap.c index 753a1de6664b..9da94ab7f05f 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_ap.c +++ b/trunk/drivers/net/wireless/hostap/hostap_ap.c @@ -16,14 +16,6 @@ * (8802.11: 5.5) */ -#include -#include -#include - -#include "hostap_wlan.h" -#include "hostap.h" -#include "hostap_ap.h" - static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL, DEF_INTS }; module_param_array(other_ap_policy, int, NULL, 0444); @@ -368,7 +360,8 @@ static int ap_control_proc_read(char *page, char **start, off_t off, } -int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac) +static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, + u8 *mac) { struct mac_entry *entry; @@ -387,7 +380,8 @@ int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac) } -int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac) +static int ap_control_del_mac(struct mac_restrictions *mac_restrictions, + u8 *mac) { struct list_head *ptr; struct mac_entry *entry; @@ -439,7 +433,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions, } -void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) +static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) { struct list_head *ptr, *n; struct mac_entry *entry; @@ -460,7 +454,8 @@ void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) } -int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac) +static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, + u8 *mac) { struct sta_info *sta; u16 resp; @@ -491,7 +486,7 @@ int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac) #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ -void ap_control_kickall(struct ap_data *ap) +static void ap_control_kickall(struct ap_data *ap) { struct list_head *ptr, *n; struct sta_info *sta; @@ -2326,9 +2321,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) } -int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], - struct iw_quality qual[], int buf_size, - int aplist) +static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], + struct iw_quality qual[], int buf_size, + int aplist) { struct ap_data *ap = local->ap; struct list_head *ptr; @@ -2368,7 +2363,7 @@ int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], /* Translate our list of Access Points & Stations to a card independant * format that the Wireless Tools will understand - Jean II */ -int prism2_ap_translate_scan(struct net_device *dev, char *buffer) +static int prism2_ap_translate_scan(struct net_device *dev, char *buffer) { struct hostap_interface *iface; local_info_t *local; @@ -2613,7 +2608,8 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap, } -int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param) +static int prism2_hostapd(struct ap_data *ap, + struct prism2_hostapd_param *param) { switch (param->cmd) { case PRISM2_HOSTAPD_FLUSH: @@ -3211,8 +3207,8 @@ void hostap_update_rates(local_info_t *local) } -void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, - struct ieee80211_crypt_data ***crypt) +static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, + struct ieee80211_crypt_data ***crypt) { struct sta_info *sta; diff --git a/trunk/drivers/net/wireless/hostap/hostap_ap.h b/trunk/drivers/net/wireless/hostap/hostap_ap.h index 2fa2452b6b07..6d00df69c2e3 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_ap.h +++ b/trunk/drivers/net/wireless/hostap/hostap_ap.h @@ -1,8 +1,6 @@ #ifndef HOSTAP_AP_H #define HOSTAP_AP_H -#include "hostap_80211.h" - /* AP data structures for STAs */ /* maximum number of frames to buffer per STA */ diff --git a/trunk/drivers/net/wireless/hostap/hostap_common.h b/trunk/drivers/net/wireless/hostap/hostap_common.h index 01624005d808..6f4fa9dc308f 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_common.h +++ b/trunk/drivers/net/wireless/hostap/hostap_common.h @@ -1,9 +1,6 @@ #ifndef HOSTAP_COMMON_H #define HOSTAP_COMMON_H -#include -#include - #define BIT(x) (1 << (x)) #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] diff --git a/trunk/drivers/net/wireless/hostap/hostap_config.h b/trunk/drivers/net/wireless/hostap/hostap_config.h index c090a5aebb58..7ed3425d08c1 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_config.h +++ b/trunk/drivers/net/wireless/hostap/hostap_config.h @@ -21,10 +21,15 @@ #define PRISM2_DOWNLOAD_SUPPORT #endif -/* Allow kernel configuration to enable non-volatile download support. */ -#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM -#define PRISM2_NON_VOLATILE_DOWNLOAD -#endif +#ifdef PRISM2_DOWNLOAD_SUPPORT +/* Allow writing firmware images into flash, i.e., to non-volatile storage. + * Before you enable this option, you should make absolutely sure that you are + * using prism2_srec utility that comes with THIS version of the driver! + * In addition, please note that it is possible to kill your card with + * non-volatile download if you are using incorrect image. This feature has not + * been fully tested, so please be careful with it. */ +/* #define PRISM2_NON_VOLATILE_DOWNLOAD */ +#endif /* PRISM2_DOWNLOAD_SUPPORT */ /* Save low-level I/O for debugging. This should not be enabled in normal use. */ diff --git a/trunk/drivers/net/wireless/hostap/hostap_info.c b/trunk/drivers/net/wireless/hostap/hostap_info.c index 50f72d831cf4..5aa998fdf1c4 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_info.c +++ b/trunk/drivers/net/wireless/hostap/hostap_info.c @@ -1,8 +1,5 @@ /* Host AP driver Info Frame processing (part of hostap.o module) */ -#include "hostap_wlan.h" -#include "hostap.h" -#include "hostap_ap.h" /* Called only as a tasklet (software IRQ) */ static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf, diff --git a/trunk/drivers/net/wireless/hostap/hostap_ioctl.c b/trunk/drivers/net/wireless/hostap/hostap_ioctl.c index f3e0ce1ee037..2617d70bcda9 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/trunk/drivers/net/wireless/hostap/hostap_ioctl.c @@ -1,13 +1,11 @@ /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ -#include +#ifdef in_atomic +/* Get kernel_locked() for in_atomic() */ #include +#endif #include -#include -#include "hostap_wlan.h" -#include "hostap.h" -#include "hostap_ap.h" static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) { @@ -3912,7 +3910,7 @@ static void prism2_get_drvinfo(struct net_device *dev, local->sta_fw_ver & 0xff); } -struct ethtool_ops prism2_ethtool_ops = { +static struct ethtool_ops prism2_ethtool_ops = { .get_drvinfo = prism2_get_drvinfo }; @@ -3987,7 +3985,7 @@ static const iw_handler prism2_private_handler[] = (iw_handler) prism2_ioctl_priv_readmif, /* 3 */ }; -const struct iw_handler_def hostap_iw_handler_def = +static const struct iw_handler_def hostap_iw_handler_def = { .num_standard = sizeof(prism2_handler) / sizeof(iw_handler), .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), diff --git a/trunk/drivers/net/wireless/hostap/hostap_main.c b/trunk/drivers/net/wireless/hostap/hostap_main.c index 8dd4c4446a64..3d2ea61033be 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_main.c +++ b/trunk/drivers/net/wireless/hostap/hostap_main.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -48,6 +47,57 @@ MODULE_VERSION(PRISM2_VERSION); #define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */)) +/* hostap.c */ +static int prism2_wds_add(local_info_t *local, u8 *remote_addr, + int rtnl_locked); +static int prism2_wds_del(local_info_t *local, u8 *remote_addr, + int rtnl_locked, int do_not_remove); + +/* hostap_ap.c */ +static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], + struct iw_quality qual[], int buf_size, + int aplist); +static int prism2_ap_translate_scan(struct net_device *dev, char *buffer); +static int prism2_hostapd(struct ap_data *ap, + struct prism2_hostapd_param *param); +static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, + struct ieee80211_crypt_data ***crypt); +static void ap_control_kickall(struct ap_data *ap); +#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT +static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, + u8 *mac); +static int ap_control_del_mac(struct mac_restrictions *mac_restrictions, + u8 *mac); +static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions); +static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, + u8 *mac); +#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */ + + +static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, + 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; +#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0])) + + +/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ +/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ +static unsigned char rfc1042_header[] = +{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; +/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ +static unsigned char bridge_tunnel_header[] = +{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; +/* No encapsulation header if EtherType < 0x600 (=length) */ + + +/* FIX: these could be compiled separately and linked together to hostap.o */ +#include "hostap_ap.c" +#include "hostap_info.c" +#include "hostap_ioctl.c" +#include "hostap_proc.c" +#include "hostap_80211_rx.c" +#include "hostap_80211_tx.c" + + struct net_device * hostap_add_interface(struct local_info *local, int type, int rtnl_locked, const char *prefix, @@ -146,8 +196,8 @@ static inline int prism2_wds_special_addr(u8 *addr) } -int prism2_wds_add(local_info_t *local, u8 *remote_addr, - int rtnl_locked) +static int prism2_wds_add(local_info_t *local, u8 *remote_addr, + int rtnl_locked) { struct net_device *dev; struct list_head *ptr; @@ -208,8 +258,8 @@ int prism2_wds_add(local_info_t *local, u8 *remote_addr, } -int prism2_wds_del(local_info_t *local, u8 *remote_addr, - int rtnl_locked, int do_not_remove) +static int prism2_wds_del(local_info_t *local, u8 *remote_addr, + int rtnl_locked, int do_not_remove) { unsigned long flags; struct list_head *ptr; diff --git a/trunk/drivers/net/wireless/hostap/hostap_proc.c b/trunk/drivers/net/wireless/hostap/hostap_proc.c index d1d8ce022e63..a0a4cbd4937a 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_proc.c +++ b/trunk/drivers/net/wireless/hostap/hostap_proc.c @@ -1,12 +1,5 @@ /* /proc routines for Host AP driver */ -#include -#include -#include - -#include "hostap_wlan.h" -#include "hostap.h" - #define PROC_LIMIT (PAGE_SIZE - 80) diff --git a/trunk/drivers/net/wireless/hostap/hostap_wlan.h b/trunk/drivers/net/wireless/hostap/hostap_wlan.h index 87a54aa6f4dd..cfd801559492 100644 --- a/trunk/drivers/net/wireless/hostap/hostap_wlan.h +++ b/trunk/drivers/net/wireless/hostap/hostap_wlan.h @@ -1,10 +1,6 @@ #ifndef HOSTAP_WLAN_H #define HOSTAP_WLAN_H -#include -#include -#include - #include "hostap_config.h" #include "hostap_common.h" diff --git a/trunk/drivers/net/wireless/ipw2100.c b/trunk/drivers/net/wireless/ipw2100.c index 8bf02763b5c7..7518384f34d9 100644 --- a/trunk/drivers/net/wireless/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2100.c @@ -5735,6 +5735,70 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev) return &priv->ieee->stats; } +#if WIRELESS_EXT < 18 +/* Support for wpa_supplicant before WE-18, deprecated. */ + +/* following definitions must match definitions in driver_ipw.c */ + +#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30 + +#define IPW2100_CMD_SET_WPA_PARAM 1 +#define IPW2100_CMD_SET_WPA_IE 2 +#define IPW2100_CMD_SET_ENCRYPTION 3 +#define IPW2100_CMD_MLME 4 + +#define IPW2100_PARAM_WPA_ENABLED 1 +#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2 +#define IPW2100_PARAM_DROP_UNENCRYPTED 3 +#define IPW2100_PARAM_PRIVACY_INVOKED 4 +#define IPW2100_PARAM_AUTH_ALGS 5 +#define IPW2100_PARAM_IEEE_802_1X 6 + +#define IPW2100_MLME_STA_DEAUTH 1 +#define IPW2100_MLME_STA_DISASSOC 2 + +#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2 +#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3 +#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4 +#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5 +#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6 +#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7 + +#define IPW2100_CRYPT_ALG_NAME_LEN 16 + +struct ipw2100_param { + u32 cmd; + u8 sta_addr[ETH_ALEN]; + union { + struct { + u8 name; + u32 value; + } wpa_param; + struct { + u32 len; + u8 reserved[32]; + u8 data[0]; + } wpa_ie; + struct { + u32 command; + u32 reason_code; + } mlme; + struct { + u8 alg[IPW2100_CRYPT_ALG_NAME_LEN]; + u8 set_tx; + u32 err; + u8 idx; + u8 seq[8]; /* sequence counter (set: RX, get: TX) */ + u16 key_len; + u8 key[0]; + } crypt; + + } u; +}; + +/* end of driver_ipw.c code */ +#endif /* WIRELESS_EXT < 18 */ + static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) { /* This is called when wpa_supplicant loads and closes the driver @@ -5743,6 +5807,11 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) return 0; } +#if WIRELESS_EXT < 18 +#define IW_AUTH_ALG_OPEN_SYSTEM 0x1 +#define IW_AUTH_ALG_SHARED_KEY 0x2 +#endif + static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) { @@ -5786,6 +5855,360 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, ipw2100_set_wpa_ie(priv, &frame, 0); } +#if WIRELESS_EXT < 18 +static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value) +{ + struct ipw2100_priv *priv = ieee80211_priv(dev); + struct ieee80211_crypt_data *crypt; + unsigned long flags; + int ret = 0; + + switch (name) { + case IPW2100_PARAM_WPA_ENABLED: + ret = ipw2100_wpa_enable(priv, value); + break; + + case IPW2100_PARAM_TKIP_COUNTERMEASURES: + crypt = priv->ieee->crypt[priv->ieee->tx_keyidx]; + if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) + break; + + flags = crypt->ops->get_flags(crypt->priv); + + if (value) + flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; + else + flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; + + crypt->ops->set_flags(flags, crypt->priv); + + break; + + case IPW2100_PARAM_DROP_UNENCRYPTED:{ + /* See IW_AUTH_DROP_UNENCRYPTED handling for details */ + struct ieee80211_security sec = { + .flags = SEC_ENABLED, + .enabled = value, + }; + priv->ieee->drop_unencrypted = value; + /* We only change SEC_LEVEL for open mode. Others + * are set by ipw_wpa_set_encryption. + */ + if (!value) { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_0; + } else { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } + if (priv->ieee->set_security) + priv->ieee->set_security(priv->ieee->dev, &sec); + break; + } + + case IPW2100_PARAM_PRIVACY_INVOKED: + priv->ieee->privacy_invoked = value; + break; + + case IPW2100_PARAM_AUTH_ALGS: + ret = ipw2100_wpa_set_auth_algs(priv, value); + break; + + case IPW2100_PARAM_IEEE_802_1X: + priv->ieee->ieee802_1x = value; + break; + + default: + printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n", + dev->name, name); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason) +{ + + struct ipw2100_priv *priv = ieee80211_priv(dev); + int ret = 0; + + switch (command) { + case IPW2100_MLME_STA_DEAUTH: + // silently ignore + break; + + case IPW2100_MLME_STA_DISASSOC: + ipw2100_disassociate_bssid(priv); + break; + + default: + printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n", + dev->name, command); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int ipw2100_wpa_set_wpa_ie(struct net_device *dev, + struct ipw2100_param *param, int plen) +{ + + struct ipw2100_priv *priv = ieee80211_priv(dev); + struct ieee80211_device *ieee = priv->ieee; + u8 *buf; + + if (!ieee->wpa_enabled) + return -EOPNOTSUPP; + + if (param->u.wpa_ie.len > MAX_WPA_IE_LEN || + (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL)) + return -EINVAL; + + if (param->u.wpa_ie.len) { + buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len); + + kfree(ieee->wpa_ie); + ieee->wpa_ie = buf; + ieee->wpa_ie_len = param->u.wpa_ie.len; + + } else { + kfree(ieee->wpa_ie); + ieee->wpa_ie = NULL; + ieee->wpa_ie_len = 0; + } + + ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); + + return 0; +} + +/* implementation borrowed from hostap driver */ + +static int ipw2100_wpa_set_encryption(struct net_device *dev, + struct ipw2100_param *param, + int param_len) +{ + int ret = 0; + struct ipw2100_priv *priv = ieee80211_priv(dev); + struct ieee80211_device *ieee = priv->ieee; + struct ieee80211_crypto_ops *ops; + struct ieee80211_crypt_data **crypt; + + struct ieee80211_security sec = { + .flags = 0, + }; + + param->u.crypt.err = 0; + param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0'; + + if (param_len != + (int)((char *)param->u.crypt.key - (char *)param) + + param->u.crypt.key_len) { + IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len, + param->u.crypt.key_len); + return -EINVAL; + } + if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && + param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && + param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { + if (param->u.crypt.idx >= WEP_KEYS) + return -EINVAL; + crypt = &ieee->crypt[param->u.crypt.idx]; + } else { + return -EINVAL; + } + + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; + if (strcmp(param->u.crypt.alg, "none") == 0) { + if (crypt) { + sec.enabled = 0; + sec.encrypt = 0; + sec.level = SEC_LEVEL_0; + sec.flags |= SEC_LEVEL; + ieee80211_crypt_delayed_deinit(ieee, crypt); + } + goto done; + } + sec.enabled = 1; + sec.encrypt = 1; + + ops = ieee80211_get_crypto_ops(param->u.crypt.alg); + if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { + request_module("ieee80211_crypt_wep"); + ops = ieee80211_get_crypto_ops(param->u.crypt.alg); + } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { + request_module("ieee80211_crypt_tkip"); + ops = ieee80211_get_crypto_ops(param->u.crypt.alg); + } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { + request_module("ieee80211_crypt_ccmp"); + ops = ieee80211_get_crypto_ops(param->u.crypt.alg); + } + if (ops == NULL) { + IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n", + dev->name, param->u.crypt.alg); + param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG; + ret = -EINVAL; + goto done; + } + + if (*crypt == NULL || (*crypt)->ops != ops) { + struct ieee80211_crypt_data *new_crypt; + + ieee80211_crypt_delayed_deinit(ieee, crypt); + + new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL); + if (new_crypt == NULL) { + ret = -ENOMEM; + goto done; + } + new_crypt->ops = ops; + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + new_crypt->priv = + new_crypt->ops->init(param->u.crypt.idx); + + if (new_crypt->priv == NULL) { + kfree(new_crypt); + param->u.crypt.err = + IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED; + ret = -EINVAL; + goto done; + } + + *crypt = new_crypt; + } + + if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key && + (*crypt)->ops->set_key(param->u.crypt.key, + param->u.crypt.key_len, param->u.crypt.seq, + (*crypt)->priv) < 0) { + IPW_DEBUG_INFO("%s: key setting failed\n", dev->name); + param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED; + ret = -EINVAL; + goto done; + } + + if (param->u.crypt.set_tx) { + ieee->tx_keyidx = param->u.crypt.idx; + sec.active_key = param->u.crypt.idx; + sec.flags |= SEC_ACTIVE_KEY; + } + + if (ops->name != NULL) { + + if (strcmp(ops->name, "WEP") == 0) { + memcpy(sec.keys[param->u.crypt.idx], + param->u.crypt.key, param->u.crypt.key_len); + sec.key_sizes[param->u.crypt.idx] = + param->u.crypt.key_len; + sec.flags |= (1 << param->u.crypt.idx); + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } else if (strcmp(ops->name, "TKIP") == 0) { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_2; + } else if (strcmp(ops->name, "CCMP") == 0) { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_3; + } + } + done: + if (ieee->set_security) + ieee->set_security(ieee->dev, &sec); + + /* Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name); + param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED; + return -EINVAL; + } + + return ret; +} + +static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p) +{ + + struct ipw2100_param *param; + int ret = 0; + + IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length); + + if (p->length < sizeof(struct ipw2100_param) || !p->pointer) + return -EINVAL; + + param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL); + if (param == NULL) + return -ENOMEM; + + if (copy_from_user(param, p->pointer, p->length)) { + kfree(param); + return -EFAULT; + } + + switch (param->cmd) { + + case IPW2100_CMD_SET_WPA_PARAM: + ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name, + param->u.wpa_param.value); + break; + + case IPW2100_CMD_SET_WPA_IE: + ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length); + break; + + case IPW2100_CMD_SET_ENCRYPTION: + ret = ipw2100_wpa_set_encryption(dev, param, p->length); + break; + + case IPW2100_CMD_MLME: + ret = ipw2100_wpa_mlme(dev, param->u.mlme.command, + param->u.mlme.reason_code); + break; + + default: + printk(KERN_ERR DRV_NAME + ": %s: Unknown WPA supplicant request: %d\n", dev->name, + param->cmd); + ret = -EOPNOTSUPP; + + } + + if (ret == 0 && copy_to_user(p->pointer, param, p->length)) + ret = -EFAULT; + + kfree(param); + return ret; +} + +static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct iwreq *wrq = (struct iwreq *)rq; + int ret = -1; + switch (cmd) { + case IPW2100_IOCTL_WPA_SUPPLICANT: + ret = ipw2100_wpa_supplicant(dev, &wrq->u.data); + return ret; + + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} +#endif /* WIRELESS_EXT < 18 */ + static void ipw_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -5914,6 +6337,9 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, dev->open = ipw2100_open; dev->stop = ipw2100_close; dev->init = ipw2100_net_init; +#if WIRELESS_EXT < 18 + dev->do_ioctl = ipw2100_ioctl; +#endif dev->get_stats = ipw2100_stats; dev->ethtool_ops = &ipw2100_ethtool_ops; dev->tx_timeout = ipw2100_tx_timeout; @@ -7429,6 +7855,7 @@ static int ipw2100_wx_get_power(struct net_device *dev, return 0; } +#if WIRELESS_EXT > 17 /* * WE-18 WPA support */ @@ -7690,6 +8117,7 @@ static int ipw2100_wx_set_mlme(struct net_device *dev, } return 0; } +#endif /* WIRELESS_EXT > 17 */ /* * @@ -7922,7 +8350,11 @@ static iw_handler ipw2100_wx_handlers[] = { NULL, /* SIOCWIWTHRSPY */ ipw2100_wx_set_wap, /* SIOCSIWAP */ ipw2100_wx_get_wap, /* SIOCGIWAP */ +#if WIRELESS_EXT > 17 ipw2100_wx_set_mlme, /* SIOCSIWMLME */ +#else + NULL, /* -- hole -- */ +#endif NULL, /* SIOCGIWAPLIST -- deprecated */ ipw2100_wx_set_scan, /* SIOCSIWSCAN */ ipw2100_wx_get_scan, /* SIOCGIWSCAN */ @@ -7946,6 +8378,7 @@ static iw_handler ipw2100_wx_handlers[] = { ipw2100_wx_get_encode, /* SIOCGIWENCODE */ ipw2100_wx_set_power, /* SIOCSIWPOWER */ ipw2100_wx_get_power, /* SIOCGIWPOWER */ +#if WIRELESS_EXT > 17 NULL, /* -- hole -- */ NULL, /* -- hole -- */ ipw2100_wx_set_genie, /* SIOCSIWGENIE */ @@ -7955,6 +8388,7 @@ static iw_handler ipw2100_wx_handlers[] = { ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */ ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ +#endif }; #define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV diff --git a/trunk/drivers/net/wireless/ipw2200.c b/trunk/drivers/net/wireless/ipw2200.c index 4c28e332ecc3..819be2b6b7df 100644 --- a/trunk/drivers/net/wireless/ipw2200.c +++ b/trunk/drivers/net/wireless/ipw2200.c @@ -8936,12 +8936,14 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, IPW_DEBUG_HC("starting request direct scan!\n"); if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { - /* We should not sleep here; otherwise we will block most - * of the system (for instance, we hold rtnl_lock when we - * get here). - */ - err = -EAGAIN; - goto done; + err = wait_event_interruptible(priv->wait_state, + !(priv-> + status & (STATUS_SCANNING | + STATUS_SCAN_ABORTING))); + if (err) { + IPW_DEBUG_HC("aborting direct scan"); + goto done; + } } memset(&scan, 0, sizeof(scan)); diff --git a/trunk/drivers/net/wireless/prism54/isl_ioctl.c b/trunk/drivers/net/wireless/prism54/isl_ioctl.c index c5cd61c7f927..135a156db25d 100644 --- a/trunk/drivers/net/wireless/prism54/isl_ioctl.c +++ b/trunk/drivers/net/wireless/prism54/isl_ioctl.c @@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, if (essid->length) { dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ /* if it is to big, trunk it */ - dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length); + dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1); } else { dwrq->flags = 0; dwrq->length = 0; diff --git a/trunk/drivers/net/wireless/prism54/islpci_eth.c b/trunk/drivers/net/wireless/prism54/islpci_eth.c index a8261d8454dd..33d64d2ee53f 100644 --- a/trunk/drivers/net/wireless/prism54/islpci_eth.c +++ b/trunk/drivers/net/wireless/prism54/islpci_eth.c @@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) #endif newskb->dev = skb->dev; - dev_kfree_skb_irq(skb); + dev_kfree_skb(skb); skb = newskb; } } diff --git a/trunk/drivers/net/wireless/ray_cs.c b/trunk/drivers/net/wireless/ray_cs.c index 7880d8c31aad..319180ca7e71 100644 --- a/trunk/drivers/net/wireless/ray_cs.c +++ b/trunk/drivers/net/wireless/ray_cs.c @@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev, extra[IW_ESSID_MAX_SIZE] = '\0'; /* Push it out ! */ - dwrq->length = strlen(extra); + dwrq->length = strlen(extra) + 1; dwrq->flags = 1; /* active */ return 0; diff --git a/trunk/drivers/net/wireless/wavelan_cs.c b/trunk/drivers/net/wireless/wavelan_cs.c index cf373625fc70..7e2039f52c49 100644 --- a/trunk/drivers/net/wireless/wavelan_cs.c +++ b/trunk/drivers/net/wireless/wavelan_cs.c @@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev, extra[IW_ESSID_MAX_SIZE] = '\0'; /* Set the length */ - wrqu->data.length = strlen(extra); + wrqu->data.length = strlen(extra) + 1; return 0; } diff --git a/trunk/drivers/scsi/ahci.c b/trunk/drivers/scsi/ahci.c index d113290b5fc0..19bd346951dd 100644 --- a/trunk/drivers/scsi/ahci.c +++ b/trunk/drivers/scsi/ahci.c @@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = { board_ahci }, /* ESB2 */ { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_ahci }, /* ICH7-M DH */ + { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_ahci }, /* ICH8 */ + { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_ahci }, /* ICH8 */ + { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_ahci }, /* ICH8 */ + { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_ahci }, /* ICH8M */ + { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_ahci }, /* ICH8M */ { } /* terminate list */ }; diff --git a/trunk/drivers/video/sbuslib.c b/trunk/drivers/video/sbuslib.c index a4d7cc51ce0b..55e6e2d60d3a 100644 --- a/trunk/drivers/video/sbuslib.c +++ b/trunk/drivers/video/sbuslib.c @@ -199,7 +199,8 @@ struct fbcmap32 { #define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32) #define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32) -static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg) +static int fbiogetputcmap(struct file *file, struct fb_info *info, + unsigned int cmd, unsigned long arg) { struct fbcmap32 __user *argp = (void __user *)arg; struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p)); @@ -235,7 +236,8 @@ struct fbcursor32 { #define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32) #define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32) -static int fbiogscursor(struct fb_info *info, unsigned long arg) +static int fbiogscursor(struct file *file, struct fb_info *info, + unsigned long arg) { struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p)); struct fbcursor32 __user *argp = (void __user *)arg; @@ -261,7 +263,8 @@ static int fbiogscursor(struct fb_info *info, unsigned long arg) return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p); } -int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) +long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) { switch (cmd) { case FBIOGTYPE: diff --git a/trunk/drivers/video/sbuslib.h b/trunk/drivers/video/sbuslib.h index 492828c3fe8f..f753939013ed 100644 --- a/trunk/drivers/video/sbuslib.h +++ b/trunk/drivers/video/sbuslib.h @@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map, int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, struct fb_info *info, int type, int fb_depth, unsigned long fb_size); -int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, +long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg); #endif /* _SBUSLIB_H */ diff --git a/trunk/include/asm-powerpc/lppaca.h b/trunk/include/asm-powerpc/lppaca.h index 4dc514aabfe7..cd9f11f1ef14 100644 --- a/trunk/include/asm-powerpc/lppaca.h +++ b/trunk/include/asm-powerpc/lppaca.h @@ -31,7 +31,7 @@ /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k * alignment is sufficient to prevent this */ -struct lppaca { +struct __attribute__((__aligned__(0x400))) lppaca { //============================================================================= // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data // NOTE: The xDynXyz fields are fields that will be dynamically changed by @@ -129,7 +129,7 @@ struct lppaca { // CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data //============================================================================= u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF -} __attribute__((__aligned__(0x400))); +}; extern struct lppaca lppaca[]; diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index a5363324cf95..323924edb26a 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -228,7 +228,6 @@ extern void dump_stack(void); ntohs((addr).s6_addr16[6]), \ ntohs((addr).s6_addr16[7]) #define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" -#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x" #if defined(__LITTLE_ENDIAN) #define HIPQUAD(addr) \ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_ah.h b/trunk/include/linux/netfilter_ipv6/ip6t_ah.h index 8531879eb464..c4f0793a0a98 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_ah.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_ah.h @@ -18,4 +18,13 @@ struct ip6t_ah #define IP6T_AH_INV_LEN 0x02 /* Invert the sense of length. */ #define IP6T_AH_INV_MASK 0x03 /* All possible flags. */ +#define MASK_HOPOPTS 128 +#define MASK_DSTOPTS 64 +#define MASK_ROUTING 32 +#define MASK_FRAGMENT 16 +#define MASK_AH 8 +#define MASK_ESP 4 +#define MASK_NONE 2 +#define MASK_PROTO 1 + #endif /*_IP6T_AH_H*/ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_esp.h b/trunk/include/linux/netfilter_ipv6/ip6t_esp.h index a91b6abc8079..01142b98a231 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_esp.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_esp.h @@ -7,6 +7,15 @@ struct ip6t_esp u_int8_t invflags; /* Inverse flags */ }; +#define MASK_HOPOPTS 128 +#define MASK_DSTOPTS 64 +#define MASK_ROUTING 32 +#define MASK_FRAGMENT 16 +#define MASK_AH 8 +#define MASK_ESP 4 +#define MASK_NONE 2 +#define MASK_PROTO 1 + /* Values for "invflags" field in struct ip6t_esp. */ #define IP6T_ESP_INV_SPI 0x01 /* Invert the sense of spi. */ #define IP6T_ESP_INV_MASK 0x01 /* All possible flags. */ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_frag.h b/trunk/include/linux/netfilter_ipv6/ip6t_frag.h index 66070a0d6dfc..449a57eca7dd 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_frag.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_frag.h @@ -21,4 +21,13 @@ struct ip6t_frag #define IP6T_FRAG_INV_LEN 0x02 /* Invert the sense of length. */ #define IP6T_FRAG_INV_MASK 0x03 /* All possible flags. */ +#define MASK_HOPOPTS 128 +#define MASK_DSTOPTS 64 +#define MASK_ROUTING 32 +#define MASK_FRAGMENT 16 +#define MASK_AH 8 +#define MASK_ESP 4 +#define MASK_NONE 2 +#define MASK_PROTO 1 + #endif /*_IP6T_FRAG_H*/ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_opts.h b/trunk/include/linux/netfilter_ipv6/ip6t_opts.h index a07e36380ae8..e259b6275bd2 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_opts.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_opts.h @@ -20,4 +20,13 @@ struct ip6t_opts #define IP6T_OPTS_INV_LEN 0x01 /* Invert the sense of length. */ #define IP6T_OPTS_INV_MASK 0x01 /* All possible flags. */ +#define MASK_HOPOPTS 128 +#define MASK_DSTOPTS 64 +#define MASK_ROUTING 32 +#define MASK_FRAGMENT 16 +#define MASK_AH 8 +#define MASK_ESP 4 +#define MASK_NONE 2 +#define MASK_PROTO 1 + #endif /*_IP6T_OPTS_H*/ diff --git a/trunk/include/linux/netfilter_ipv6/ip6t_rt.h b/trunk/include/linux/netfilter_ipv6/ip6t_rt.h index 52156023e8db..f1070fbf2757 100644 --- a/trunk/include/linux/netfilter_ipv6/ip6t_rt.h +++ b/trunk/include/linux/netfilter_ipv6/ip6t_rt.h @@ -30,4 +30,13 @@ struct ip6t_rt #define IP6T_RT_INV_LEN 0x04 /* Invert the sense of length. */ #define IP6T_RT_INV_MASK 0x07 /* All possible flags. */ +#define MASK_HOPOPTS 128 +#define MASK_DSTOPTS 64 +#define MASK_ROUTING 32 +#define MASK_FRAGMENT 16 +#define MASK_AH 8 +#define MASK_ESP 4 +#define MASK_NONE 2 +#define MASK_PROTO 1 + #endif /*_IP6T_RT_H*/ diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index ad7cc22bd424..e5fd66c5650b 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -926,7 +926,7 @@ static inline int skb_tailroom(const struct sk_buff *skb) * Increase the headroom of an empty &sk_buff by reducing the tail * room. This is only allowed for an empty buffer. */ -static inline void skb_reserve(struct sk_buff *skb, int len) +static inline void skb_reserve(struct sk_buff *skb, unsigned int len) { skb->data += len; skb->tail += len; diff --git a/trunk/include/net/ieee80211_crypt.h b/trunk/include/net/ieee80211_crypt.h index cd82c3e998e4..03b766afdc39 100644 --- a/trunk/include/net/ieee80211_crypt.h +++ b/trunk/include/net/ieee80211_crypt.h @@ -25,7 +25,6 @@ #include #include -#include #include enum { diff --git a/trunk/include/net/iw_handler.h b/trunk/include/net/iw_handler.h index a2c5e0b88422..d67c8393a343 100644 --- a/trunk/include/net/iw_handler.h +++ b/trunk/include/net/iw_handler.h @@ -327,7 +327,7 @@ struct iw_handler_def __u16 num_private_args; /* Array of handlers for standard ioctls - * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT] + * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME] */ const iw_handler * standard; diff --git a/trunk/net/bridge/netfilter/ebt_ip.c b/trunk/net/bridge/netfilter/ebt_ip.c index dc5d0b2427cf..f158fe67dd60 100644 --- a/trunk/net/bridge/netfilter/ebt_ip.c +++ b/trunk/net/bridge/netfilter/ebt_ip.c @@ -92,9 +92,7 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask, if (info->invflags & EBT_IP_PROTO) return -EINVAL; if (info->protocol != IPPROTO_TCP && - info->protocol != IPPROTO_UDP && - info->protocol != IPPROTO_SCTP && - info->protocol != IPPROTO_DCCP) + info->protocol != IPPROTO_UDP) return -EINVAL; } if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1]) diff --git a/trunk/net/bridge/netfilter/ebt_log.c b/trunk/net/bridge/netfilter/ebt_log.c index 0128fbbe2328..a29c1232c420 100644 --- a/trunk/net/bridge/netfilter/ebt_log.c +++ b/trunk/net/bridge/netfilter/ebt_log.c @@ -95,9 +95,7 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum, "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr), NIPQUAD(ih->daddr), ih->tos, ih->protocol); if (ih->protocol == IPPROTO_TCP || - ih->protocol == IPPROTO_UDP || - ih->protocol == IPPROTO_SCTP || - ih->protocol == IPPROTO_DCCP) { + ih->protocol == IPPROTO_UDP) { struct tcpudphdr _ports, *pptr; pptr = skb_header_pointer(skb, ih->ihl*4, diff --git a/trunk/net/core/filter.c b/trunk/net/core/filter.c index 9540946a48f3..a52665f75224 100644 --- a/trunk/net/core/filter.c +++ b/trunk/net/core/filter.c @@ -74,6 +74,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k, * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ + unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { struct sock_filter *fentry; /* We walk down these */ @@ -174,7 +175,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int continue; case BPF_LD|BPF_W|BPF_ABS: k = fentry->k; -load_w: + load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = ntohl(*(u32 *)ptr); @@ -183,7 +184,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int break; case BPF_LD|BPF_H|BPF_ABS: k = fentry->k; -load_h: + load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = ntohs(*(u16 *)ptr); @@ -373,7 +374,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) case BPF_JMP|BPF_JSET|BPF_K: case BPF_JMP|BPF_JSET|BPF_X: /* for conditionals both must be safe */ - if (pc + ftest->jt + 1 >= flen || + if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; @@ -383,7 +384,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) } } - return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; + return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; } /** @@ -403,8 +404,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) int err; /* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) - return -EINVAL; + if (fprog->filter == NULL) + return -EINVAL; fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); if (!fp) diff --git a/trunk/net/core/netpoll.c b/trunk/net/core/netpoll.c index ea51f8d02eb8..281a632fa6a6 100644 --- a/trunk/net/core/netpoll.c +++ b/trunk/net/core/netpoll.c @@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np) } } - if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr) + if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr) memcpy(np->local_mac, ndev->dev_addr, 6); if (!np->local_ip) { diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index 3827f881f429..39063122fbb7 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -139,7 +139,6 @@ #include #include #include -#include #include #include #include @@ -282,8 +281,8 @@ struct pktgen_dev { __u32 src_mac_count; /* How many MACs to iterate through */ __u32 dst_mac_count; /* How many MACs to iterate through */ - unsigned char dst_mac[ETH_ALEN]; - unsigned char src_mac[ETH_ALEN]; + unsigned char dst_mac[6]; + unsigned char src_mac[6]; __u32 cur_dst_mac_offset; __u32 cur_src_mac_offset; @@ -595,9 +594,16 @@ static int pktgen_if_show(struct seq_file *seq, void *v) seq_puts(seq, " src_mac: "); - if (is_zero_ether_addr(pkt_dev->src_mac)) + if ((pkt_dev->src_mac[0] == 0) && + (pkt_dev->src_mac[1] == 0) && + (pkt_dev->src_mac[2] == 0) && + (pkt_dev->src_mac[3] == 0) && + (pkt_dev->src_mac[4] == 0) && + (pkt_dev->src_mac[5] == 0)) + for (i = 0; i < 6; i++) seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); + else for (i = 0; i < 6; i++) seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); @@ -1183,9 +1189,9 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer } if (!strcmp(name, "dst_mac")) { char *v = valstr; - unsigned char old_dmac[ETH_ALEN]; + unsigned char old_dmac[6]; unsigned char *m = pkt_dev->dst_mac; - memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); + memcpy(old_dmac, pkt_dev->dst_mac, 6); len = strn_len(&user_buffer[i], sizeof(valstr) - 1); if (len < 0) { return len; } @@ -1214,8 +1220,8 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer } /* Set up Dest MAC */ - if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) - memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); + if (memcmp(old_dmac, pkt_dev->dst_mac, 6) != 0) + memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6); sprintf(pg_result, "OK: dstmac"); return count; @@ -1554,11 +1560,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) /* Default to the interface's mac if not explicitly set. */ - if (is_zero_ether_addr(pkt_dev->src_mac)) - memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN); + if ((pkt_dev->src_mac[0] == 0) && + (pkt_dev->src_mac[1] == 0) && + (pkt_dev->src_mac[2] == 0) && + (pkt_dev->src_mac[3] == 0) && + (pkt_dev->src_mac[4] == 0) && + (pkt_dev->src_mac[5] == 0)) { + memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, 6); + } /* Set up Dest MAC */ - memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); + memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6); /* Set up pkt size */ pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; diff --git a/trunk/net/dccp/ackvec.c b/trunk/net/dccp/ackvec.c index 2c77dafbd091..ce9cb77c5c29 100644 --- a/trunk/net/dccp/ackvec.c +++ b/trunk/net/dccp/ackvec.c @@ -144,7 +144,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av, const unsigned char state) { unsigned int gap; - long new_head; + signed long new_head; if (av->dccpav_vec_len + packets > av->dccpav_buf_len) return -ENOBUFS; diff --git a/trunk/net/ipv4/netfilter/Makefile b/trunk/net/ipv4/netfilter/Makefile index e5c5b3202f02..bcefe64b9317 100644 --- a/trunk/net/ipv4/netfilter/Makefile +++ b/trunk/net/ipv4/netfilter/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o # matches +obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o diff --git a/trunk/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/trunk/net/ipv4/netfilter/ip_conntrack_proto_gre.c index 56794797d55b..c777abf16cb7 100644 --- a/trunk/net/ipv4/netfilter/ip_conntrack_proto_gre.c +++ b/trunk/net/ipv4/netfilter/ip_conntrack_proto_gre.c @@ -32,7 +32,6 @@ #include #include #include -#include static DEFINE_RWLOCK(ip_ct_gre_lock); #define ASSERT_READ_LOCK(x) diff --git a/trunk/net/ipv4/netfilter/ipt_policy.c b/trunk/net/ipv4/netfilter/ipt_policy.c index 18ca8258a1c5..709debcc69c9 100644 --- a/trunk/net/ipv4/netfilter/ipt_policy.c +++ b/trunk/net/ipv4/netfilter/ipt_policy.c @@ -95,10 +95,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info) static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, - const void *matchinfo, - int offset, - unsigned int protoff, - int *hotdrop) + const void *matchinfo, int offset, int *hotdrop) { const struct ipt_policy_info *info = matchinfo; int ret; @@ -116,7 +113,7 @@ static int match(const struct sk_buff *skb, return ret; } -static int checkentry(const char *tablename, const void *ip_void, +static int checkentry(const char *tablename, const struct ipt_ip *ip, void *matchinfo, unsigned int matchsize, unsigned int hook_mask) { diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index f2e82afc15b3..f701a136a6ae 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -240,8 +240,9 @@ static unsigned rt_hash_mask; static int rt_hash_log; static unsigned int rt_hash_rnd; -static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); -#define RT_CACHE_STAT_INC(field) (__get_cpu_var(rt_cache_stat).field++) +static struct rt_cache_stat *rt_cache_stat; +#define RT_CACHE_STAT_INC(field) \ + (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++) static int rt_intern_hash(unsigned hash, struct rtable *rth, struct rtable **res); @@ -400,7 +401,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) if (!cpu_possible(cpu)) continue; *pos = cpu+1; - return &per_cpu(rt_cache_stat, cpu); + return per_cpu_ptr(rt_cache_stat, cpu); } return NULL; } @@ -413,7 +414,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (!cpu_possible(cpu)) continue; *pos = cpu+1; - return &per_cpu(rt_cache_stat, cpu); + return per_cpu_ptr(rt_cache_stat, cpu); } return NULL; @@ -3159,6 +3160,10 @@ int __init ip_rt_init(void) ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); ip_rt_max_size = (rt_hash_mask + 1) * 16; + rt_cache_stat = alloc_percpu(struct rt_cache_stat); + if (!rt_cache_stat) + return -ENOMEM; + devinet_init(); ip_fib_init(); @@ -3186,6 +3191,7 @@ int __init ip_rt_init(void) if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, proc_net_stat))) { + free_percpu(rt_cache_stat); return -ENOMEM; } rtstat_pde->proc_fops = &rt_cpu_seq_fops; diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index d328d5986143..dfb4f145a139 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v) { struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; seq_printf(seq, - NIP6_SEQFMT " %02x %02x %02x %02x %8s\n", + NIP6_FMT " %02x %02x %02x %02x %8s\n", NIP6(ifp->addr), ifp->idev->dev->ifindex, ifp->prefix_len, diff --git a/trunk/net/ipv6/anycast.c b/trunk/net/ipv6/anycast.c index 840a33d33296..72bd08af2dfb 100644 --- a/trunk/net/ipv6/anycast.c +++ b/trunk/net/ipv6/anycast.c @@ -532,7 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v) struct ac6_iter_state *state = ac6_seq_private(seq); seq_printf(seq, - "%-4d %-15s " NIP6_SEQFMT " %5d\n", + "%-4d %-15s " NIP6_FMT " %5d\n", state->dev->ifindex, state->dev->name, NIP6(im->aca_addr), im->aca_users); diff --git a/trunk/net/ipv6/ip6_flowlabel.c b/trunk/net/ipv6/ip6_flowlabel.c index 69cbe8a66d02..4183c8dac7f6 100644 --- a/trunk/net/ipv6/ip6_flowlabel.c +++ b/trunk/net/ipv6/ip6_flowlabel.c @@ -629,7 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl) { while(fl) { seq_printf(seq, - "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n", + "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n", (unsigned)ntohl(fl->label), fl->share, (unsigned)fl->owner, @@ -645,7 +645,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl) static int ip6fl_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) - seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", + seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n", "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); else ip6fl_fl_seq_show(seq, v); diff --git a/trunk/net/ipv6/mcast.c b/trunk/net/ipv6/mcast.c index 6c05c7978bef..0e03eabfb9da 100644 --- a/trunk/net/ipv6/mcast.c +++ b/trunk/net/ipv6/mcast.c @@ -2373,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v) struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); seq_printf(seq, - "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", + "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", state->dev->ifindex, state->dev->name, NIP6(im->mca_addr), im->mca_users, im->mca_flags, @@ -2542,12 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) { seq_printf(seq, "%3s %6s " - "%32s %32s %6s %6s\n", "Idx", + "%39s %39s %6s %6s\n", "Idx", "Device", "Multicast Address", "Source Address", "INC", "EXC"); } else { seq_printf(seq, - "%3d %6.6s " NIP6_SEQFMT " " NIP6_SEQFMT " %6lu %6lu\n", + "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n", state->dev->ifindex, state->dev->name, NIP6(state->im->mca_addr), NIP6(psf->sf_addr), diff --git a/trunk/net/ipv6/netfilter/Makefile b/trunk/net/ipv6/netfilter/Makefile index db6073c94163..663b4749820d 100644 --- a/trunk/net/ipv6/netfilter/Makefile +++ b/trunk/net/ipv6/netfilter/Makefile @@ -4,6 +4,7 @@ # Link order matters here. obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o +obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o diff --git a/trunk/net/ipv6/netfilter/ip6t_dst.c b/trunk/net/ipv6/netfilter/ip6t_dst.c index b4c153a53500..80fe82669ce2 100644 --- a/trunk/net/ipv6/netfilter/ip6t_dst.c +++ b/trunk/net/ipv6/netfilter/ip6t_dst.c @@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo "); #endif /* - * (Type & 0xC0) >> 6 - * 0 -> ignorable - * 1 -> must drop the packet - * 2 -> send ICMP PARM PROB regardless and drop packet - * 3 -> Send ICMP if not a multicast address and drop packet + * (Type & 0xC0) >> 6 + * 0 -> ignorable + * 1 -> must drop the packet + * 2 -> send ICMP PARM PROB regardless and drop packet + * 3 -> Send ICMP if not a multicast address and drop packet * (Type & 0x20) >> 5 - * 0 -> invariant - * 1 -> can change the routing + * 0 -> invariant + * 1 -> can change the routing * (Type & 0x1F) Type - * 0 -> Pad1 (only 1 byte!) - * 1 -> PadN LENGTH info (total length = length + 2) - * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) - * 5 -> RTALERT 2 x x + * 0 -> Pad1 (only 1 byte!) + * 1 -> PadN LENGTH info (total length = length + 2) + * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) + * 5 -> RTALERT 2 x x */ static int @@ -60,16 +60,16 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct ipv6_opt_hdr _optsh, *oh; - const struct ip6t_opts *optinfo = matchinfo; - unsigned int temp; - unsigned int ptr; - unsigned int hdrlen = 0; - unsigned int ret = 0; - u8 _opttype, *tp = NULL; - u8 _optlen, *lp = NULL; - unsigned int optlen; - + struct ipv6_opt_hdr _optsh, *oh; + const struct ip6t_opts *optinfo = matchinfo; + unsigned int temp; + unsigned int ptr; + unsigned int hdrlen = 0; + unsigned int ret = 0; + u8 _opttype, *tp = NULL; + u8 _optlen, *lp = NULL; + unsigned int optlen; + #if HOPBYHOP if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) #else @@ -77,41 +77,42 @@ match(const struct sk_buff *skb, #endif return 0; - oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); - if (oh == NULL) { - *hotdrop = 1; - return 0; - } - - hdrlen = ipv6_optlen(oh); - if (skb->len - ptr < hdrlen) { - /* Packet smaller than it's length field */ - return 0; - } - - DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); - - DEBUGP("len %02X %04X %02X ", - optinfo->hdrlen, hdrlen, - (!(optinfo->flags & IP6T_OPTS_LEN) || - ((optinfo->hdrlen == hdrlen) ^ - !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); - - ret = (oh != NULL) && - (!(optinfo->flags & IP6T_OPTS_LEN) || - ((optinfo->hdrlen == hdrlen) ^ - !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); - - ptr += 2; - hdrlen -= 2; - if (!(optinfo->flags & IP6T_OPTS_OPTS)) { - return ret; + oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); + if (oh == NULL){ + *hotdrop = 1; + return 0; + } + + hdrlen = ipv6_optlen(oh); + if (skb->len - ptr < hdrlen){ + /* Packet smaller than it's length field */ + return 0; + } + + DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); + + DEBUGP("len %02X %04X %02X ", + optinfo->hdrlen, hdrlen, + (!(optinfo->flags & IP6T_OPTS_LEN) || + ((optinfo->hdrlen == hdrlen) ^ + !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); + + ret = (oh != NULL) + && + (!(optinfo->flags & IP6T_OPTS_LEN) || + ((optinfo->hdrlen == hdrlen) ^ + !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); + + ptr += 2; + hdrlen -= 2; + if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){ + return ret; } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { DEBUGP("Not strict - not implemented"); } else { DEBUGP("Strict "); - DEBUGP("#%d ", optinfo->optsnr); - for (temp = 0; temp < optinfo->optsnr; temp++) { + DEBUGP("#%d ",optinfo->optsnr); + for(temp=0; tempoptsnr; temp++){ /* type field exists ? */ if (hdrlen < 1) break; @@ -121,10 +122,10 @@ match(const struct sk_buff *skb, break; /* Type check */ - if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) { + if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){ DEBUGP("Tbad %02X %02X\n", *tp, - (optinfo->opts[temp] & 0xFF00) >> 8); + (optinfo->opts[temp] & 0xFF00)>>8); return 0; } else { DEBUGP("Tok "); @@ -168,8 +169,7 @@ match(const struct sk_buff *skb, } if (temp == optinfo->optsnr) return ret; - else - return 0; + else return 0; } return 0; @@ -178,24 +178,25 @@ match(const struct sk_buff *skb, /* Called when user tries to insert an entry of this type. */ static int checkentry(const char *tablename, - const void *info, - void *matchinfo, - unsigned int matchinfosize, - unsigned int hook_mask) + const void *info, + void *matchinfo, + unsigned int matchinfosize, + unsigned int hook_mask) { - const struct ip6t_opts *optsinfo = matchinfo; - - if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { - DEBUGP("ip6t_opts: matchsize %u != %u\n", - matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); - return 0; - } - if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { - DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags); - return 0; - } - - return 1; + const struct ip6t_opts *optsinfo = matchinfo; + + if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { + DEBUGP("ip6t_opts: matchsize %u != %u\n", + matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); + return 0; + } + if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { + DEBUGP("ip6t_opts: unknown flags %X\n", + optsinfo->invflags); + return 0; + } + + return 1; } static struct ip6t_match opts_match = { @@ -211,12 +212,12 @@ static struct ip6t_match opts_match = { static int __init init(void) { - return ip6t_register_match(&opts_match); + return ip6t_register_match(&opts_match); } static void __exit cleanup(void) { - ip6t_unregister_match(&opts_match); + ip6t_unregister_match(&opts_match); } module_init(init); diff --git a/trunk/net/ipv6/netfilter/ip6t_eui64.c b/trunk/net/ipv6/netfilter/ip6t_eui64.c index 27396ac0b9ed..ddf5f571909c 100644 --- a/trunk/net/ipv6/netfilter/ip6t_eui64.c +++ b/trunk/net/ipv6/netfilter/ip6t_eui64.c @@ -27,45 +27,45 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - unsigned char eui64[8]; - int i = 0; - if (!(skb->mac.raw >= skb->head && - (skb->mac.raw + ETH_HLEN) <= skb->data) && - offset != 0) { - *hotdrop = 1; - return 0; - } - - memset(eui64, 0, sizeof(eui64)); - - if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { - if (skb->nh.ipv6h->version == 0x6) { - memcpy(eui64, eth_hdr(skb)->h_source, 3); - memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); - eui64[3] = 0xff; - eui64[4] = 0xfe; - eui64[0] |= 0x02; - - i = 0; - while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i]) - && (i < 8)) - i++; - - if (i == 8) - return 1; - } - } - - return 0; + unsigned char eui64[8]; + int i=0; + + if ( !(skb->mac.raw >= skb->head + && (skb->mac.raw + ETH_HLEN) <= skb->data) + && offset != 0) { + *hotdrop = 1; + return 0; + } + + memset(eui64, 0, sizeof(eui64)); + + if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { + if (skb->nh.ipv6h->version == 0x6) { + memcpy(eui64, eth_hdr(skb)->h_source, 3); + memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); + eui64[3]=0xff; + eui64[4]=0xfe; + eui64[0] |= 0x02; + + i=0; + while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == + eui64[i]) && (i<8)) i++; + + if ( i == 8 ) + return 1; + } + } + + return 0; } static int ip6t_eui64_checkentry(const char *tablename, - const void *ip, - void *matchinfo, - unsigned int matchsize, - unsigned int hook_mask) + const void *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) { if (hook_mask & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) | diff --git a/trunk/net/ipv6/netfilter/ip6t_frag.c b/trunk/net/ipv6/netfilter/ip6t_frag.c index 4c14125a0e26..a9964b946ed5 100644 --- a/trunk/net/ipv6/netfilter/ip6t_frag.c +++ b/trunk/net/ipv6/netfilter/ip6t_frag.c @@ -31,12 +31,12 @@ MODULE_AUTHOR("Andras Kis-Szabo "); static inline int id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) { - int r = 0; - DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ', - min, id, max); - r = (id >= min && id <= max) ^ invert; - DEBUGP(" result %s\n", r ? "PASS" : "FAILED"); - return r; + int r=0; + DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', + min,id,max); + r=(id >= min && id <= max) ^ invert; + DEBUGP(" result %s\n",r? "PASS" : "FAILED"); + return r; } static int @@ -48,91 +48,92 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct frag_hdr _frag, *fh; - const struct ip6t_frag *fraginfo = matchinfo; - unsigned int ptr; + struct frag_hdr _frag, *fh; + const struct ip6t_frag *fraginfo = matchinfo; + unsigned int ptr; if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0) return 0; fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); - if (fh == NULL) { + if (fh == NULL){ *hotdrop = 1; return 0; } - DEBUGP("INFO %04X ", fh->frag_off); - DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); - DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); - DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF)); - DEBUGP("ID %u %08X\n", ntohl(fh->identification), - ntohl(fh->identification)); - - DEBUGP("IPv6 FRAG id %02X ", - (id_match(fraginfo->ids[0], fraginfo->ids[1], - ntohl(fh->identification), - !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))); - DEBUGP("res %02X %02X%04X %02X ", - (fraginfo->flags & IP6T_FRAG_RES), fh->reserved, - ntohs(fh->frag_off) & 0x6, - !((fraginfo->flags & IP6T_FRAG_RES) - && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); - DEBUGP("first %02X %02X %02X ", - (fraginfo->flags & IP6T_FRAG_FST), - ntohs(fh->frag_off) & ~0x7, - !((fraginfo->flags & IP6T_FRAG_FST) - && (ntohs(fh->frag_off) & ~0x7))); - DEBUGP("mf %02X %02X %02X ", - (fraginfo->flags & IP6T_FRAG_MF), - ntohs(fh->frag_off) & IP6_MF, - !((fraginfo->flags & IP6T_FRAG_MF) - && !((ntohs(fh->frag_off) & IP6_MF)))); - DEBUGP("last %02X %02X %02X\n", - (fraginfo->flags & IP6T_FRAG_NMF), - ntohs(fh->frag_off) & IP6_MF, - !((fraginfo->flags & IP6T_FRAG_NMF) - && (ntohs(fh->frag_off) & IP6_MF))); - - return (fh != NULL) - && - (id_match(fraginfo->ids[0], fraginfo->ids[1], - ntohl(fh->identification), - !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))) - && - !((fraginfo->flags & IP6T_FRAG_RES) - && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) - && - !((fraginfo->flags & IP6T_FRAG_FST) - && (ntohs(fh->frag_off) & ~0x7)) - && - !((fraginfo->flags & IP6T_FRAG_MF) - && !(ntohs(fh->frag_off) & IP6_MF)) - && - !((fraginfo->flags & IP6T_FRAG_NMF) - && (ntohs(fh->frag_off) & IP6_MF)); + DEBUGP("INFO %04X ", fh->frag_off); + DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); + DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); + DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF)); + DEBUGP("ID %u %08X\n", ntohl(fh->identification), + ntohl(fh->identification)); + + DEBUGP("IPv6 FRAG id %02X ", + (id_match(fraginfo->ids[0], fraginfo->ids[1], + ntohl(fh->identification), + !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))); + DEBUGP("res %02X %02X%04X %02X ", + (fraginfo->flags & IP6T_FRAG_RES), fh->reserved, + ntohs(fh->frag_off) & 0x6, + !((fraginfo->flags & IP6T_FRAG_RES) + && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); + DEBUGP("first %02X %02X %02X ", + (fraginfo->flags & IP6T_FRAG_FST), + ntohs(fh->frag_off) & ~0x7, + !((fraginfo->flags & IP6T_FRAG_FST) + && (ntohs(fh->frag_off) & ~0x7))); + DEBUGP("mf %02X %02X %02X ", + (fraginfo->flags & IP6T_FRAG_MF), + ntohs(fh->frag_off) & IP6_MF, + !((fraginfo->flags & IP6T_FRAG_MF) + && !((ntohs(fh->frag_off) & IP6_MF)))); + DEBUGP("last %02X %02X %02X\n", + (fraginfo->flags & IP6T_FRAG_NMF), + ntohs(fh->frag_off) & IP6_MF, + !((fraginfo->flags & IP6T_FRAG_NMF) + && (ntohs(fh->frag_off) & IP6_MF))); + + return (fh != NULL) + && + (id_match(fraginfo->ids[0], fraginfo->ids[1], + ntohl(fh->identification), + !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))) + && + !((fraginfo->flags & IP6T_FRAG_RES) + && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) + && + !((fraginfo->flags & IP6T_FRAG_FST) + && (ntohs(fh->frag_off) & ~0x7)) + && + !((fraginfo->flags & IP6T_FRAG_MF) + && !(ntohs(fh->frag_off) & IP6_MF)) + && + !((fraginfo->flags & IP6T_FRAG_NMF) + && (ntohs(fh->frag_off) & IP6_MF)); } /* Called when user tries to insert an entry of this type. */ static int checkentry(const char *tablename, - const void *ip, - void *matchinfo, - unsigned int matchinfosize, - unsigned int hook_mask) + const void *ip, + void *matchinfo, + unsigned int matchinfosize, + unsigned int hook_mask) { - const struct ip6t_frag *fraginfo = matchinfo; - - if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) { - DEBUGP("ip6t_frag: matchsize %u != %u\n", - matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag))); - return 0; - } - if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { - DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags); - return 0; - } - - return 1; + const struct ip6t_frag *fraginfo = matchinfo; + + if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) { + DEBUGP("ip6t_frag: matchsize %u != %u\n", + matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag))); + return 0; + } + if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { + DEBUGP("ip6t_frag: unknown flags %X\n", + fraginfo->invflags); + return 0; + } + + return 1; } static struct ip6t_match frag_match = { @@ -144,12 +145,12 @@ static struct ip6t_match frag_match = { static int __init init(void) { - return ip6t_register_match(&frag_match); + return ip6t_register_match(&frag_match); } static void __exit cleanup(void) { - ip6t_unregister_match(&frag_match); + ip6t_unregister_match(&frag_match); } module_init(init); diff --git a/trunk/net/ipv6/netfilter/ip6t_hbh.c b/trunk/net/ipv6/netfilter/ip6t_hbh.c index 37a8474a7e0c..ed8ded18bbd4 100644 --- a/trunk/net/ipv6/netfilter/ip6t_hbh.c +++ b/trunk/net/ipv6/netfilter/ip6t_hbh.c @@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo "); #endif /* - * (Type & 0xC0) >> 6 - * 0 -> ignorable - * 1 -> must drop the packet - * 2 -> send ICMP PARM PROB regardless and drop packet - * 3 -> Send ICMP if not a multicast address and drop packet + * (Type & 0xC0) >> 6 + * 0 -> ignorable + * 1 -> must drop the packet + * 2 -> send ICMP PARM PROB regardless and drop packet + * 3 -> Send ICMP if not a multicast address and drop packet * (Type & 0x20) >> 5 - * 0 -> invariant - * 1 -> can change the routing + * 0 -> invariant + * 1 -> can change the routing * (Type & 0x1F) Type - * 0 -> Pad1 (only 1 byte!) - * 1 -> PadN LENGTH info (total length = length + 2) - * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) - * 5 -> RTALERT 2 x x + * 0 -> Pad1 (only 1 byte!) + * 1 -> PadN LENGTH info (total length = length + 2) + * C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k ) + * 5 -> RTALERT 2 x x */ static int @@ -60,16 +60,16 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct ipv6_opt_hdr _optsh, *oh; - const struct ip6t_opts *optinfo = matchinfo; - unsigned int temp; - unsigned int ptr; - unsigned int hdrlen = 0; - unsigned int ret = 0; - u8 _opttype, *tp = NULL; - u8 _optlen, *lp = NULL; - unsigned int optlen; - + struct ipv6_opt_hdr _optsh, *oh; + const struct ip6t_opts *optinfo = matchinfo; + unsigned int temp; + unsigned int ptr; + unsigned int hdrlen = 0; + unsigned int ret = 0; + u8 _opttype, *tp = NULL; + u8 _optlen, *lp = NULL; + unsigned int optlen; + #if HOPBYHOP if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0) #else @@ -77,41 +77,42 @@ match(const struct sk_buff *skb, #endif return 0; - oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); - if (oh == NULL) { - *hotdrop = 1; - return 0; - } - - hdrlen = ipv6_optlen(oh); - if (skb->len - ptr < hdrlen) { - /* Packet smaller than it's length field */ - return 0; - } - - DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); - - DEBUGP("len %02X %04X %02X ", - optinfo->hdrlen, hdrlen, - (!(optinfo->flags & IP6T_OPTS_LEN) || - ((optinfo->hdrlen == hdrlen) ^ - !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); - - ret = (oh != NULL) && - (!(optinfo->flags & IP6T_OPTS_LEN) || - ((optinfo->hdrlen == hdrlen) ^ - !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); - - ptr += 2; - hdrlen -= 2; - if (!(optinfo->flags & IP6T_OPTS_OPTS)) { - return ret; + oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh); + if (oh == NULL){ + *hotdrop = 1; + return 0; + } + + hdrlen = ipv6_optlen(oh); + if (skb->len - ptr < hdrlen){ + /* Packet smaller than it's length field */ + return 0; + } + + DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen); + + DEBUGP("len %02X %04X %02X ", + optinfo->hdrlen, hdrlen, + (!(optinfo->flags & IP6T_OPTS_LEN) || + ((optinfo->hdrlen == hdrlen) ^ + !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); + + ret = (oh != NULL) + && + (!(optinfo->flags & IP6T_OPTS_LEN) || + ((optinfo->hdrlen == hdrlen) ^ + !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); + + ptr += 2; + hdrlen -= 2; + if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){ + return ret; } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { DEBUGP("Not strict - not implemented"); } else { DEBUGP("Strict "); - DEBUGP("#%d ", optinfo->optsnr); - for (temp = 0; temp < optinfo->optsnr; temp++) { + DEBUGP("#%d ",optinfo->optsnr); + for(temp=0; tempoptsnr; temp++){ /* type field exists ? */ if (hdrlen < 1) break; @@ -121,10 +122,10 @@ match(const struct sk_buff *skb, break; /* Type check */ - if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) { + if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){ DEBUGP("Tbad %02X %02X\n", *tp, - (optinfo->opts[temp] & 0xFF00) >> 8); + (optinfo->opts[temp] & 0xFF00)>>8); return 0; } else { DEBUGP("Tok "); @@ -168,8 +169,7 @@ match(const struct sk_buff *skb, } if (temp == optinfo->optsnr) return ret; - else - return 0; + else return 0; } return 0; @@ -178,24 +178,25 @@ match(const struct sk_buff *skb, /* Called when user tries to insert an entry of this type. */ static int checkentry(const char *tablename, - const void *entry, - void *matchinfo, - unsigned int matchinfosize, - unsigned int hook_mask) + const void *entry, + void *matchinfo, + unsigned int matchinfosize, + unsigned int hook_mask) { - const struct ip6t_opts *optsinfo = matchinfo; - - if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { - DEBUGP("ip6t_opts: matchsize %u != %u\n", - matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); - return 0; - } - if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { - DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags); - return 0; - } - - return 1; + const struct ip6t_opts *optsinfo = matchinfo; + + if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) { + DEBUGP("ip6t_opts: matchsize %u != %u\n", + matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts))); + return 0; + } + if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) { + DEBUGP("ip6t_opts: unknown flags %X\n", + optsinfo->invflags); + return 0; + } + + return 1; } static struct ip6t_match opts_match = { @@ -211,12 +212,12 @@ static struct ip6t_match opts_match = { static int __init init(void) { - return ip6t_register_match(&opts_match); + return ip6t_register_match(&opts_match); } static void __exit cleanup(void) { - ip6t_unregister_match(&opts_match); + ip6t_unregister_match(&opts_match); } module_init(init); diff --git a/trunk/net/ipv6/netfilter/ip6t_ipv6header.c b/trunk/net/ipv6/netfilter/ip6t_ipv6header.c index 83ad6b272f7e..fda1ceaf5a29 100644 --- a/trunk/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/trunk/net/ipv6/netfilter/ip6t_ipv6header.c @@ -50,20 +50,20 @@ ipv6header_match(const struct sk_buff *skb, len = skb->len - ptr; temp = 0; - while (ip6t_ext_hdr(nexthdr)) { + while (ip6t_ext_hdr(nexthdr)) { struct ipv6_opt_hdr _hdr, *hp; - int hdrlen; + int hdrlen; /* Is there enough space for the next ext header? */ - if (len < (int)sizeof(struct ipv6_opt_hdr)) - return 0; + if (len < (int)sizeof(struct ipv6_opt_hdr)) + return 0; /* No more exthdr -> evaluate */ - if (nexthdr == NEXTHDR_NONE) { + if (nexthdr == NEXTHDR_NONE) { temp |= MASK_NONE; break; } /* ESP -> evaluate */ - if (nexthdr == NEXTHDR_ESP) { + if (nexthdr == NEXTHDR_ESP) { temp |= MASK_ESP; break; } @@ -72,43 +72,43 @@ ipv6header_match(const struct sk_buff *skb, BUG_ON(hp == NULL); /* Calculate the header length */ - if (nexthdr == NEXTHDR_FRAGMENT) { - hdrlen = 8; - } else if (nexthdr == NEXTHDR_AUTH) - hdrlen = (hp->hdrlen + 2) << 2; - else - hdrlen = ipv6_optlen(hp); + if (nexthdr == NEXTHDR_FRAGMENT) { + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) + hdrlen = (hp->hdrlen+2)<<2; + else + hdrlen = ipv6_optlen(hp); /* set the flag */ - switch (nexthdr) { - case NEXTHDR_HOP: - temp |= MASK_HOPOPTS; - break; - case NEXTHDR_ROUTING: - temp |= MASK_ROUTING; - break; - case NEXTHDR_FRAGMENT: - temp |= MASK_FRAGMENT; - break; - case NEXTHDR_AUTH: - temp |= MASK_AH; - break; - case NEXTHDR_DEST: - temp |= MASK_DSTOPTS; - break; - default: - return 0; - break; + switch (nexthdr){ + case NEXTHDR_HOP: + temp |= MASK_HOPOPTS; + break; + case NEXTHDR_ROUTING: + temp |= MASK_ROUTING; + break; + case NEXTHDR_FRAGMENT: + temp |= MASK_FRAGMENT; + break; + case NEXTHDR_AUTH: + temp |= MASK_AH; + break; + case NEXTHDR_DEST: + temp |= MASK_DSTOPTS; + break; + default: + return 0; + break; } - nexthdr = hp->nexthdr; - len -= hdrlen; - ptr += hdrlen; + nexthdr = hp->nexthdr; + len -= hdrlen; + ptr += hdrlen; if (ptr > skb->len) break; - } + } - if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP)) + if ( (nexthdr != NEXTHDR_NONE ) && (nexthdr != NEXTHDR_ESP) ) temp |= MASK_PROTO; if (info->modeflag) @@ -137,8 +137,8 @@ ipv6header_checkentry(const char *tablename, return 0; /* invflags is 0 or 0xff in hard mode */ - if ((!info->modeflag) && info->invflags != 0x00 && - info->invflags != 0xFF) + if ((!info->modeflag) && info->invflags != 0x00 + && info->invflags != 0xFF) return 0; return 1; @@ -152,7 +152,7 @@ static struct ip6t_match ip6t_ipv6header_match = { .me = THIS_MODULE, }; -static int __init ipv6header_init(void) +static int __init ipv6header_init(void) { return ip6t_register_match(&ip6t_ipv6header_match); } @@ -164,3 +164,4 @@ static void __exit ipv6header_exit(void) module_init(ipv6header_init); module_exit(ipv6header_exit); + diff --git a/trunk/net/ipv6/netfilter/ip6t_owner.c b/trunk/net/ipv6/netfilter/ip6t_owner.c index 8c8a4c7ec934..5409b375b512 100644 --- a/trunk/net/ipv6/netfilter/ip6t_owner.c +++ b/trunk/net/ipv6/netfilter/ip6t_owner.c @@ -36,14 +36,14 @@ match(const struct sk_buff *skb, if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file) return 0; - if (info->match & IP6T_OWNER_UID) { - if ((skb->sk->sk_socket->file->f_uid != info->uid) ^ + if(info->match & IP6T_OWNER_UID) { + if((skb->sk->sk_socket->file->f_uid != info->uid) ^ !!(info->invert & IP6T_OWNER_UID)) return 0; } - if (info->match & IP6T_OWNER_GID) { - if ((skb->sk->sk_socket->file->f_gid != info->gid) ^ + if(info->match & IP6T_OWNER_GID) { + if((skb->sk->sk_socket->file->f_gid != info->gid) ^ !!(info->invert & IP6T_OWNER_GID)) return 0; } @@ -53,23 +53,23 @@ match(const struct sk_buff *skb, static int checkentry(const char *tablename, - const void *ip, - void *matchinfo, - unsigned int matchsize, - unsigned int hook_mask) + const void *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) { const struct ip6t_owner_info *info = matchinfo; - if (hook_mask - & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) { - printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); - return 0; - } + if (hook_mask + & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) { + printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n"); + return 0; + } if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info))) return 0; - if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) { + if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) { printk("ipt_owner: pid and sid matching " "not supported anymore\n"); return 0; diff --git a/trunk/net/ipv6/netfilter/ip6t_policy.c b/trunk/net/ipv6/netfilter/ip6t_policy.c index afe1cc4c18a5..13fedad48c1d 100644 --- a/trunk/net/ipv6/netfilter/ip6t_policy.c +++ b/trunk/net/ipv6/netfilter/ip6t_policy.c @@ -118,7 +118,7 @@ static int match(const struct sk_buff *skb, return ret; } -static int checkentry(const char *tablename, const void *ip_void, +static int checkentry(const char *tablename, const struct ip6t_ip6 *ip, void *matchinfo, unsigned int matchsize, unsigned int hook_mask) { diff --git a/trunk/net/ipv6/netfilter/ip6t_rt.c b/trunk/net/ipv6/netfilter/ip6t_rt.c index 8f82476dc89e..8465b4375855 100644 --- a/trunk/net/ipv6/netfilter/ip6t_rt.c +++ b/trunk/net/ipv6/netfilter/ip6t_rt.c @@ -33,12 +33,12 @@ MODULE_AUTHOR("Andras Kis-Szabo "); static inline int segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert) { - int r = 0; - DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x", - invert ? '!' : ' ', min, id, max); - r = (id >= min && id <= max) ^ invert; - DEBUGP(" result %s\n", r ? "PASS" : "FAILED"); - return r; + int r=0; + DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', + min,id,max); + r=(id >= min && id <= max) ^ invert; + DEBUGP(" result %s\n",r? "PASS" : "FAILED"); + return r; } static int @@ -50,93 +50,87 @@ match(const struct sk_buff *skb, unsigned int protoff, int *hotdrop) { - struct ipv6_rt_hdr _route, *rh; - const struct ip6t_rt *rtinfo = matchinfo; - unsigned int temp; - unsigned int ptr; - unsigned int hdrlen = 0; - unsigned int ret = 0; - struct in6_addr *ap, _addr; + struct ipv6_rt_hdr _route, *rh; + const struct ip6t_rt *rtinfo = matchinfo; + unsigned int temp; + unsigned int ptr; + unsigned int hdrlen = 0; + unsigned int ret = 0; + struct in6_addr *ap, _addr; if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0) return 0; - rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); - if (rh == NULL) { - *hotdrop = 1; - return 0; - } - - hdrlen = ipv6_optlen(rh); - if (skb->len - ptr < hdrlen) { - /* Pcket smaller than its length field */ - return 0; - } - - DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); - DEBUGP("TYPE %04X ", rh->type); - DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); - - DEBUGP("IPv6 RT segsleft %02X ", - (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], - rh->segments_left, - !!(rtinfo->invflags & IP6T_RT_INV_SGS)))); - DEBUGP("type %02X %02X %02X ", - rtinfo->rt_type, rh->type, - (!(rtinfo->flags & IP6T_RT_TYP) || - ((rtinfo->rt_type == rh->type) ^ - !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); - DEBUGP("len %02X %04X %02X ", - rtinfo->hdrlen, hdrlen, - (!(rtinfo->flags & IP6T_RT_LEN) || - ((rtinfo->hdrlen == hdrlen) ^ - !!(rtinfo->invflags & IP6T_RT_INV_LEN)))); - DEBUGP("res %02X %02X %02X ", - (rtinfo->flags & IP6T_RT_RES), - ((struct rt0_hdr *)rh)->reserved, - !((rtinfo->flags & IP6T_RT_RES) && - (((struct rt0_hdr *)rh)->reserved))); - - ret = (rh != NULL) - && - (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], - rh->segments_left, - !!(rtinfo->invflags & IP6T_RT_INV_SGS))) - && - (!(rtinfo->flags & IP6T_RT_LEN) || - ((rtinfo->hdrlen == hdrlen) ^ - !!(rtinfo->invflags & IP6T_RT_INV_LEN))) - && - (!(rtinfo->flags & IP6T_RT_TYP) || - ((rtinfo->rt_type == rh->type) ^ - !!(rtinfo->invflags & IP6T_RT_INV_TYP))); + rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); + if (rh == NULL){ + *hotdrop = 1; + return 0; + } + + hdrlen = ipv6_optlen(rh); + if (skb->len - ptr < hdrlen){ + /* Pcket smaller than its length field */ + return 0; + } + + DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); + DEBUGP("TYPE %04X ", rh->type); + DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); + + DEBUGP("IPv6 RT segsleft %02X ", + (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], + rh->segments_left, + !!(rtinfo->invflags & IP6T_RT_INV_SGS)))); + DEBUGP("type %02X %02X %02X ", + rtinfo->rt_type, rh->type, + (!(rtinfo->flags & IP6T_RT_TYP) || + ((rtinfo->rt_type == rh->type) ^ + !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); + DEBUGP("len %02X %04X %02X ", + rtinfo->hdrlen, hdrlen, + (!(rtinfo->flags & IP6T_RT_LEN) || + ((rtinfo->hdrlen == hdrlen) ^ + !!(rtinfo->invflags & IP6T_RT_INV_LEN)))); + DEBUGP("res %02X %02X %02X ", + (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved, + !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved))); + + ret = (rh != NULL) + && + (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], + rh->segments_left, + !!(rtinfo->invflags & IP6T_RT_INV_SGS))) + && + (!(rtinfo->flags & IP6T_RT_LEN) || + ((rtinfo->hdrlen == hdrlen) ^ + !!(rtinfo->invflags & IP6T_RT_INV_LEN))) + && + (!(rtinfo->flags & IP6T_RT_TYP) || + ((rtinfo->rt_type == rh->type) ^ + !!(rtinfo->invflags & IP6T_RT_INV_TYP))); if (ret && (rtinfo->flags & IP6T_RT_RES)) { u_int32_t *rp, _reserved; rp = skb_header_pointer(skb, - ptr + offsetof(struct rt0_hdr, - reserved), - sizeof(_reserved), - &_reserved); + ptr + offsetof(struct rt0_hdr, reserved), + sizeof(_reserved), &_reserved); ret = (*rp == 0); } - DEBUGP("#%d ", rtinfo->addrnr); - if (!(rtinfo->flags & IP6T_RT_FST)) { - return ret; + DEBUGP("#%d ",rtinfo->addrnr); + if ( !(rtinfo->flags & IP6T_RT_FST) ){ + return ret; } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { DEBUGP("Not strict "); - if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { + if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){ DEBUGP("There isn't enough space\n"); return 0; } else { unsigned int i = 0; - DEBUGP("#%d ", rtinfo->addrnr); - for (temp = 0; - temp < (unsigned int)((hdrlen - 8) / 16); - temp++) { + DEBUGP("#%d ",rtinfo->addrnr); + for(temp=0; temp<(unsigned int)((hdrlen-8)/16); temp++){ ap = skb_header_pointer(skb, ptr + sizeof(struct rt0_hdr) @@ -147,26 +141,24 @@ match(const struct sk_buff *skb, BUG_ON(ap == NULL); if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { - DEBUGP("i=%d temp=%d;\n", i, temp); + DEBUGP("i=%d temp=%d;\n",i,temp); i++; } - if (i == rtinfo->addrnr) - break; + if (i==rtinfo->addrnr) break; } DEBUGP("i=%d #%d\n", i, rtinfo->addrnr); if (i == rtinfo->addrnr) return ret; - else - return 0; + else return 0; } } else { DEBUGP("Strict "); - if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { + if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){ DEBUGP("There isn't enough space\n"); return 0; } else { - DEBUGP("#%d ", rtinfo->addrnr); - for (temp = 0; temp < rtinfo->addrnr; temp++) { + DEBUGP("#%d ",rtinfo->addrnr); + for(temp=0; tempaddrnr; temp++){ ap = skb_header_pointer(skb, ptr + sizeof(struct rt0_hdr) @@ -179,11 +171,9 @@ match(const struct sk_buff *skb, break; } DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr); - if ((temp == rtinfo->addrnr) && - (temp == (unsigned int)((hdrlen - 8) / 16))) + if ((temp == rtinfo->addrnr) && (temp == (unsigned int)((hdrlen-8)/16))) return ret; - else - return 0; + else return 0; } } @@ -193,31 +183,32 @@ match(const struct sk_buff *skb, /* Called when user tries to insert an entry of this type. */ static int checkentry(const char *tablename, - const void *entry, - void *matchinfo, - unsigned int matchinfosize, - unsigned int hook_mask) + const void *entry, + void *matchinfo, + unsigned int matchinfosize, + unsigned int hook_mask) { - const struct ip6t_rt *rtinfo = matchinfo; - - if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) { - DEBUGP("ip6t_rt: matchsize %u != %u\n", - matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt))); - return 0; - } - if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { - DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags); - return 0; - } - if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) && - (!(rtinfo->flags & IP6T_RT_TYP) || - (rtinfo->rt_type != 0) || - (rtinfo->invflags & IP6T_RT_INV_TYP))) { - DEBUGP("`--rt-type 0' required before `--rt-0-*'"); - return 0; - } - - return 1; + const struct ip6t_rt *rtinfo = matchinfo; + + if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) { + DEBUGP("ip6t_rt: matchsize %u != %u\n", + matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt))); + return 0; + } + if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { + DEBUGP("ip6t_rt: unknown flags %X\n", + rtinfo->invflags); + return 0; + } + if ( (rtinfo->flags & (IP6T_RT_RES|IP6T_RT_FST_MASK)) && + (!(rtinfo->flags & IP6T_RT_TYP) || + (rtinfo->rt_type != 0) || + (rtinfo->invflags & IP6T_RT_INV_TYP)) ) { + DEBUGP("`--rt-type 0' required before `--rt-0-*'"); + return 0; + } + + return 1; } static struct ip6t_match rt_match = { @@ -229,12 +220,12 @@ static struct ip6t_match rt_match = { static int __init init(void) { - return ip6t_register_match(&rt_match); + return ip6t_register_match(&rt_match); } static void __exit cleanup(void) { - ip6t_unregister_match(&rt_match); + ip6t_unregister_match(&rt_match); } module_init(init); diff --git a/trunk/net/rxrpc/krxtimod.c b/trunk/net/rxrpc/krxtimod.c index 3e7466900bd4..3ac81cdd1211 100644 --- a/trunk/net/rxrpc/krxtimod.c +++ b/trunk/net/rxrpc/krxtimod.c @@ -81,7 +81,7 @@ static int krxtimod(void *arg) for (;;) { unsigned long jif; - long timeout; + signed long timeout; /* deal with the server being asked to die */ if (krxtimod_die) { diff --git a/trunk/net/rxrpc/proc.c b/trunk/net/rxrpc/proc.c index 29975d99d864..3b5ecd8e2401 100644 --- a/trunk/net/rxrpc/proc.c +++ b/trunk/net/rxrpc/proc.c @@ -361,7 +361,7 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v) static int rxrpc_proc_peers_show(struct seq_file *m, void *v) { struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link); - long timeout; + signed long timeout; /* display header on line 1 */ if (v == SEQ_START_TOKEN) { @@ -373,8 +373,8 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v) /* display one peer per line on subsequent lines */ timeout = 0; if (!list_empty(&peer->timeout.link)) - timeout = (long) peer->timeout.timo_jif - - (long) jiffies; + timeout = (signed long) peer->timeout.timo_jif - + (signed long) jiffies; seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n", peer->trans->port, @@ -468,7 +468,7 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v) static int rxrpc_proc_conns_show(struct seq_file *m, void *v) { struct rxrpc_connection *conn; - long timeout; + signed long timeout; conn = list_entry(v, struct rxrpc_connection, proc_link); @@ -484,8 +484,8 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v) /* display one conn per line on subsequent lines */ timeout = 0; if (!list_empty(&conn->timeout.link)) - timeout = (long) conn->timeout.timo_jif - - (long) jiffies; + timeout = (signed long) conn->timeout.timo_jif - + (signed long) jiffies; seq_printf(m, "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n", diff --git a/trunk/net/sched/sch_prio.c b/trunk/net/sched/sch_prio.c index 1641db33a994..5b3a3e48ed92 100644 --- a/trunk/net/sched/sch_prio.c +++ b/trunk/net/sched/sch_prio.c @@ -228,13 +228,14 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt) } sch_tree_unlock(sch); - for (i=0; ibands; i++) { - if (q->queues[i] == &noop_qdisc) { + for (i=0; i<=TC_PRIO_MAX; i++) { + int band = q->prio2band[i]; + if (q->queues[band] == &noop_qdisc) { struct Qdisc *child; child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); if (child) { sch_tree_lock(sch); - child = xchg(&q->queues[i], child); + child = xchg(&q->queues[band], child); if (child != &noop_qdisc) qdisc_destroy(child); diff --git a/trunk/net/sched/sch_sfq.c b/trunk/net/sched/sch_sfq.c index 86d8da0cbd02..8734bb7280e3 100644 --- a/trunk/net/sched/sch_sfq.c +++ b/trunk/net/sched/sch_sfq.c @@ -144,8 +144,6 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP || - iph->protocol == IPPROTO_SCTP || - iph->protocol == IPPROTO_DCCP || iph->protocol == IPPROTO_ESP)) h2 ^= *(((u32*)iph) + iph->ihl); break; @@ -157,8 +155,6 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; if (iph->nexthdr == IPPROTO_TCP || iph->nexthdr == IPPROTO_UDP || - iph->nexthdr == IPPROTO_SCTP || - iph->nexthdr == IPPROTO_DCCP || iph->nexthdr == IPPROTO_ESP) h2 ^= *(u32*)&iph[1]; break; diff --git a/trunk/sound/sparc/cs4231.c b/trunk/sound/sparc/cs4231.c index fd6543998788..e9086e95a31f 100644 --- a/trunk/sound/sparc/cs4231.c +++ b/trunk/sound/sparc/cs4231.c @@ -69,14 +69,13 @@ struct sbus_dma_info { }; #endif -struct snd_cs4231; struct cs4231_dma_control { void (*prepare)(struct cs4231_dma_control *dma_cont, int dir); void (*enable)(struct cs4231_dma_control *dma_cont, int on); int (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len); unsigned int (*address)(struct cs4231_dma_control *dma_cont); void (*reset)(struct snd_cs4231 *chip); - void (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); + void (*preallocate)(struct snd_cs4231 *chip, struct snd_snd_pcm *pcm); #ifdef EBUS_SUPPORT struct ebus_dma_info ebus_info; #endif