diff --git a/[refs] b/[refs] index 99e5d0aa75f6..0a22408d8319 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 70c03b49b80ba3634958acc31853771019c0ebd3 +refs/heads/master: edd4b53e03049f6fc2f46397b23e412cfe720a4e diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 5378511a5f9f..5b3f31faed56 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -312,15 +312,3 @@ When: 2.6.26 Why: Implementation became generic; users should now include linux/semaphore.h instead. Who: Matthew Wilcox - ---------------------------- - -What: SCTP_GET_PEER_ADDRS_NUM_OLD, SCTP_GET_PEER_ADDRS_OLD, - SCTP_GET_LOCAL_ADDRS_NUM_OLD, SCTP_GET_LOCAL_ADDRS_OLD -When: June 2009 -Why: A newer version of the options have been introduced in 2005 that - removes the limitions of the old API. The sctp library has been - converted to use these new options at the same time. Any user - space app that directly uses the old options should convert to using - the new options. -Who: Vlad Yasevich diff --git a/trunk/Documentation/networking/ip-sysctl.txt b/trunk/Documentation/networking/ip-sysctl.txt index 72f6d52e52e6..17a6e46fbd43 100644 --- a/trunk/Documentation/networking/ip-sysctl.txt +++ b/trunk/Documentation/networking/ip-sysctl.txt @@ -548,9 +548,8 @@ icmp_echo_ignore_broadcasts - BOOLEAN icmp_ratelimit - INTEGER Limit the maximal rates for sending ICMP packets whose type matches icmp_ratemask (see below) to specific targets. - 0 to disable any limiting, - otherwise the minimal space between responses in milliseconds. - Default: 1000 + 0 to disable any limiting, otherwise the maximal rate in jiffies(1) + Default: 100 icmp_ratemask - INTEGER Mask made of ICMP types for which rates are being limited. @@ -1025,23 +1024,11 @@ max_addresses - INTEGER autoconfigured addresses. Default: 16 -disable_ipv6 - BOOLEAN - Disable IPv6 operation. - Default: FALSE (enable IPv6 operation) - -accept_dad - INTEGER - Whether to accept DAD (Duplicate Address Detection). - 0: Disable DAD - 1: Enable DAD (default) - 2: Enable DAD, and disable IPv6 operation if MAC-based duplicate - link-local address has been found. - icmp/*: ratelimit - INTEGER Limit the maximal rates for sending ICMPv6 packets. - 0 to disable any limiting, - otherwise the minimal space between responses in milliseconds. - Default: 1000 + 0 to disable any limiting, otherwise the maximal rate in jiffies(1) + Default: 100 IPv6 Update by: diff --git a/trunk/drivers/char/pcmcia/synclink_cs.c b/trunk/drivers/char/pcmcia/synclink_cs.c index fb2fb159faa3..1dd0e992c83d 100644 --- a/trunk/drivers/char/pcmcia/synclink_cs.c +++ b/trunk/drivers/char/pcmcia/synclink_cs.c @@ -3886,8 +3886,9 @@ static bool rx_get_frame(MGSLPC_INFO *info) framesize = 0; #if SYNCLINK_GENERIC_HDLC { - info->netdev->stats.rx_errors++; - info->netdev->stats.rx_frame_errors++; + struct net_device_stats *stats = hdlc_stats(info->netdev); + stats->rx_errors++; + stats->rx_frame_errors++; } #endif } else @@ -4143,6 +4144,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) @@ -4157,8 +4159,8 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) info->tx_put = info->tx_count = skb->len; /* update network statistics */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); @@ -4374,13 +4376,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void hdlcdev_tx_timeout(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + stats->tx_errors++; + stats->tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); @@ -4413,26 +4416,27 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; + struct net_device_stats *stats = hdlc_stats(dev); if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); - dev->stats.rx_dropped++; + stats->rx_dropped++; return; } - memcpy(skb_put(skb, size), buf, size); + memcpy(skb_put(skb, size),buf,size); - skb->protocol = hdlc_type_trans(skb, dev); + skb->protocol = hdlc_type_trans(skb, info->netdev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += size; + stats->rx_packets++; + stats->rx_bytes += size; netif_rx(skb); - dev->last_rx = jiffies; + info->netdev->last_rx = jiffies; } /** diff --git a/trunk/drivers/char/synclink.c b/trunk/drivers/char/synclink.c index 9d247d8a87a3..ac5080df2565 100644 --- a/trunk/drivers/char/synclink.c +++ b/trunk/drivers/char/synclink.c @@ -6640,8 +6640,9 @@ static bool mgsl_get_rx_frame(struct mgsl_struct *info) framesize = 0; #if SYNCLINK_GENERIC_HDLC { - info->netdev->stats.rx_errors++; - info->netdev->stats.rx_frame_errors++; + struct net_device_stats *stats = hdlc_stats(info->netdev); + stats->rx_errors++; + stats->rx_frame_errors++; } #endif } else @@ -7752,6 +7753,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct mgsl_struct *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) @@ -7765,8 +7767,8 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) mgsl_load_tx_dma_buffer(info, skb->data, skb->len); /* update network statistics */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); @@ -7982,13 +7984,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void hdlcdev_tx_timeout(struct net_device *dev) { struct mgsl_struct *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + stats->tx_errors++; + stats->tx_aborted_errors++; spin_lock_irqsave(&info->irq_spinlock,flags); usc_stop_transmitter(info); @@ -8021,27 +8024,27 @@ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; + struct net_device_stats *stats = hdlc_stats(dev); if (debug_level >= DEBUG_LEVEL_INFO) - printk("hdlcdev_rx(%s)\n", dev->name); + printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { - printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", - dev->name); - dev->stats.rx_dropped++; + printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); + stats->rx_dropped++; return; } - memcpy(skb_put(skb, size), buf, size); + memcpy(skb_put(skb, size),buf,size); - skb->protocol = hdlc_type_trans(skb, dev); + skb->protocol = hdlc_type_trans(skb, info->netdev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += size; + stats->rx_packets++; + stats->rx_bytes += size; netif_rx(skb); - dev->last_rx = jiffies; + info->netdev->last_rx = jiffies; } /** diff --git a/trunk/drivers/char/synclink_gt.c b/trunk/drivers/char/synclink_gt.c index d88a607e34b7..55c1653be00c 100644 --- a/trunk/drivers/char/synclink_gt.c +++ b/trunk/drivers/char/synclink_gt.c @@ -1544,6 +1544,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct slgt_info *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; DBGINFO(("%s hdlc_xmit\n", dev->name)); @@ -1556,8 +1557,8 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) tx_load(info, skb->data, skb->len); /* update network statistics */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); @@ -1774,12 +1775,13 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void hdlcdev_tx_timeout(struct net_device *dev) { struct slgt_info *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name)); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + stats->tx_errors++; + stats->tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); @@ -1812,25 +1814,26 @@ static void hdlcdev_rx(struct slgt_info *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; + struct net_device_stats *stats = hdlc_stats(dev); DBGINFO(("%s hdlcdev_rx\n", dev->name)); if (skb == NULL) { DBGERR(("%s: can't alloc skb, drop packet\n", dev->name)); - dev->stats.rx_dropped++; + stats->rx_dropped++; return; } - memcpy(skb_put(skb, size), buf, size); + memcpy(skb_put(skb, size),buf,size); - skb->protocol = hdlc_type_trans(skb, dev); + skb->protocol = hdlc_type_trans(skb, info->netdev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += size; + stats->rx_packets++; + stats->rx_bytes += size; netif_rx(skb); - dev->last_rx = jiffies; + info->netdev->last_rx = jiffies; } /** @@ -4574,8 +4577,9 @@ static bool rx_get_frame(struct slgt_info *info) #if SYNCLINK_GENERIC_HDLC if (framesize == 0) { - info->netdev->stats.rx_errors++; - info->netdev->stats.rx_frame_errors++; + struct net_device_stats *stats = hdlc_stats(info->netdev); + stats->rx_errors++; + stats->rx_frame_errors++; } #endif diff --git a/trunk/drivers/char/synclinkmp.c b/trunk/drivers/char/synclinkmp.c index 10241ed86100..bec54866e0bb 100644 --- a/trunk/drivers/char/synclinkmp.c +++ b/trunk/drivers/char/synclinkmp.c @@ -1678,6 +1678,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) @@ -1691,8 +1692,8 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) tx_load_dma_buffer(info, skb->data, skb->len); /* update network statistics */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); @@ -1908,13 +1909,14 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void hdlcdev_tx_timeout(struct net_device *dev) { SLMP_INFO *info = dev_to_port(dev); + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + stats->tx_errors++; + stats->tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); @@ -1947,27 +1949,27 @@ static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; + struct net_device_stats *stats = hdlc_stats(dev); if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { - printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", - dev->name); - dev->stats.rx_dropped++; + printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); + stats->rx_dropped++; return; } - memcpy(skb_put(skb, size), buf, size); + memcpy(skb_put(skb, size),buf,size); - skb->protocol = hdlc_type_trans(skb, dev); + skb->protocol = hdlc_type_trans(skb, info->netdev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += size; + stats->rx_packets++; + stats->rx_bytes += size; netif_rx(skb); - dev->last_rx = jiffies; + info->netdev->last_rx = jiffies; } /** @@ -4981,8 +4983,9 @@ static bool rx_get_frame(SLMP_INFO *info) framesize = 0; #if SYNCLINK_GENERIC_HDLC { - info->netdev->stats.rx_errors++; - info->netdev->stats.rx_frame_errors++; + struct net_device_stats *stats = hdlc_stats(info->netdev); + stats->rx_errors++; + stats->rx_frame_errors++; } #endif } diff --git a/trunk/drivers/net/3c503.c b/trunk/drivers/net/3c503.c index 900b0ffdcc68..9c23336750e2 100644 --- a/trunk/drivers/net/3c503.c +++ b/trunk/drivers/net/3c503.c @@ -149,7 +149,7 @@ el2_pio_probe(struct net_device *dev) #ifndef MODULE struct net_device * __init el2_probe(int unit) { - struct net_device *dev = alloc_eip_netdev(); + struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) @@ -340,7 +340,7 @@ el2_probe1(struct net_device *dev, int ioaddr) dev->stop = &el2_close; dev->ethtool_ops = &netdev_ethtool_ops; #ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = eip_poll; + dev->poll_controller = ei_poll; #endif retval = register_netdev(dev); @@ -386,7 +386,7 @@ el2_open(struct net_device *dev) outb_p(0x00, E33G_IDCFR); if (*irqp == probe_irq_off(cookie) /* It's a good IRQ line! */ && ((retval = request_irq(dev->irq = *irqp, - eip_interrupt, 0, dev->name, dev)) == 0)) + ei_interrupt, 0, dev->name, dev)) == 0)) break; } } while (*++irqp); @@ -395,13 +395,13 @@ el2_open(struct net_device *dev) return retval; } } else { - if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) { + if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) { return retval; } } el2_init_card(dev); - eip_open(dev); + ei_open(dev); return 0; } @@ -412,7 +412,7 @@ el2_close(struct net_device *dev) dev->irq = ei_status.saved_irq; outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ - eip_close(dev); + ei_close(dev); return 0; } @@ -698,7 +698,7 @@ init_module(void) if (this_dev != 0) break; /* only autoprobe 1st one */ printk(KERN_NOTICE "3c503.c: Presently autoprobing (not recommended) for a single card.\n"); } - dev = alloc_eip_netdev(); + dev = alloc_ei_netdev(); if (!dev) break; dev->irq = irq[this_dev]; diff --git a/trunk/drivers/net/8390.h b/trunk/drivers/net/8390.h index 8e209f5e7c11..cf020d45aea6 100644 --- a/trunk/drivers/net/8390.h +++ b/trunk/drivers/net/8390.h @@ -30,10 +30,8 @@ extern int ei_debug; #ifdef CONFIG_NET_POLL_CONTROLLER extern void ei_poll(struct net_device *dev); -extern void eip_poll(struct net_device *dev); #endif -/* Without I/O delay - non ISA or later chips */ extern void NS8390_init(struct net_device *dev, int startp); extern int ei_open(struct net_device *dev); extern int ei_close(struct net_device *dev); @@ -44,17 +42,6 @@ static inline struct net_device *alloc_ei_netdev(void) return __alloc_ei_netdev(0); } -/* With I/O delay form */ -extern void NS8390p_init(struct net_device *dev, int startp); -extern int eip_open(struct net_device *dev); -extern int eip_close(struct net_device *dev); -extern irqreturn_t eip_interrupt(int irq, void *dev_id); -extern struct net_device *__alloc_eip_netdev(int size); -static inline struct net_device *alloc_eip_netdev(void) -{ - return __alloc_eip_netdev(0); -} - /* You have one of these per-board */ struct ei_device { const char *name; @@ -128,14 +115,13 @@ struct ei_device { /* * Only generate indirect loads given a machine that needs them. * - removed AMIGA_PCMCIA from this list, handled as ISA io now - * - the _p for generates no delay by default 8390p.c overrides this. */ #ifndef ei_inb #define ei_inb(_p) inb(_p) #define ei_outb(_v,_p) outb(_v,_p) -#define ei_inb_p(_p) inb(_p) -#define ei_outb_p(_v,_p) outb(_v,_p) +#define ei_inb_p(_p) inb_p(_p) +#define ei_outb_p(_v,_p) outb_p(_v,_p) #endif #ifndef EI_SHIFT diff --git a/trunk/drivers/net/8390p.c b/trunk/drivers/net/8390p.c deleted file mode 100644 index 71f19884c4b1..000000000000 --- a/trunk/drivers/net/8390p.c +++ /dev/null @@ -1,66 +0,0 @@ -/* 8390 core for ISA devices needing bus delays */ - -static const char version[] = - "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; - -#define ei_inb(_p) inb(_p) -#define ei_outb(_v,_p) outb(_v,_p) -#define ei_inb_p(_p) inb_p(_p) -#define ei_outb_p(_v,_p) outb_p(_v,_p) - -#include "lib8390.c" - -int eip_open(struct net_device *dev) -{ - return __ei_open(dev); -} - -int eip_close(struct net_device *dev) -{ - return __ei_close(dev); -} - -irqreturn_t eip_interrupt(int irq, void *dev_id) -{ - return __ei_interrupt(irq, dev_id); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -void eip_poll(struct net_device *dev) -{ - __ei_poll(dev); -} -#endif - -struct net_device *__alloc_eip_netdev(int size) -{ - return ____alloc_ei_netdev(size); -} - -void NS8390p_init(struct net_device *dev, int startp) -{ - return __NS8390_init(dev, startp); -} - -EXPORT_SYMBOL(eip_open); -EXPORT_SYMBOL(eip_close); -EXPORT_SYMBOL(eip_interrupt); -#ifdef CONFIG_NET_POLL_CONTROLLER -EXPORT_SYMBOL(eip_poll); -#endif -EXPORT_SYMBOL(NS8390p_init); -EXPORT_SYMBOL(__alloc_eip_netdev); - -#if defined(MODULE) - -int init_module(void) -{ - return 0; -} - -void cleanup_module(void) -{ -} - -#endif /* MODULE */ -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index 84925915dce4..d85b9d067597 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -2122,13 +2122,27 @@ config R8169 To compile this driver as a module, choose M here: the module will be called r8169. This is recommended. +config R8169_NAPI + bool "Use Rx Polling (NAPI) (EXPERIMENTAL)" + depends on R8169 && EXPERIMENTAL + help + NAPI is a new driver API designed to reduce CPU and interrupt load + when the driver is receiving lots of packets from the card. It is + still somewhat experimental and thus not yet enabled by default. + + If your estimated Rx load is 10kpps or more, or if the card will be + deployed on potentially unfriendly networks (e.g. in a firewall), + then say Y here. + + If in doubt, say N. + config R8169_VLAN bool "VLAN support" depends on R8169 && VLAN_8021Q ---help--- Say Y here for the r8169 driver to support the functions required by the kernel 802.1Q code. - + If in doubt, say Y. config SB1250_MAC diff --git a/trunk/drivers/net/Makefile b/trunk/drivers/net/Makefile index 4beb043e09e6..87703ffd4c1e 100644 --- a/trunk/drivers/net/Makefile +++ b/trunk/drivers/net/Makefile @@ -106,11 +106,11 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) endif obj-$(CONFIG_68360_ENET) += 68360enet.o obj-$(CONFIG_WD80x3) += wd.o 8390.o -obj-$(CONFIG_EL2) += 3c503.o 8390p.o -obj-$(CONFIG_NE2000) += ne.o 8390p.o -obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o -obj-$(CONFIG_HPLAN) += hp.o 8390p.o -obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o +obj-$(CONFIG_EL2) += 3c503.o 8390.o +obj-$(CONFIG_NE2000) += ne.o 8390.o +obj-$(CONFIG_NE2_MCA) += ne2.o 8390.o +obj-$(CONFIG_HPLAN) += hp.o 8390.o +obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o diff --git a/trunk/drivers/net/hp.c b/trunk/drivers/net/hp.c index 8281209ededf..c649a8019beb 100644 --- a/trunk/drivers/net/hp.c +++ b/trunk/drivers/net/hp.c @@ -103,7 +103,7 @@ static int __init do_hp_probe(struct net_device *dev) #ifndef MODULE struct net_device * __init hp_probe(int unit) { - struct net_device *dev = alloc_eip_netdev(); + struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) @@ -176,7 +176,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE); outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE); if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */ - && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) { + && request_irq (irq, ei_interrupt, 0, DRV_NAME, dev) == 0) { printk(" selecting IRQ %d.\n", irq); dev->irq = *irqp; break; @@ -191,7 +191,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) } else { if (dev->irq == 2) dev->irq = 9; - if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) { + if ((retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) { printk (" unable to get IRQ %d.\n", dev->irq); goto out; } @@ -202,7 +202,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) dev->open = &hp_open; dev->stop = &hp_close; #ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = eip_poll; + dev->poll_controller = ei_poll; #endif ei_status.name = name; @@ -231,14 +231,14 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) static int hp_open(struct net_device *dev) { - eip_open(dev); + ei_open(dev); return 0; } static int hp_close(struct net_device *dev) { - eip_close(dev); + ei_close(dev); return 0; } @@ -421,7 +421,7 @@ init_module(void) if (this_dev != 0) break; /* only autoprobe 1st one */ printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n"); } - dev = alloc_eip_netdev(); + dev = alloc_ei_netdev(); if (!dev) break; dev->irq = irq[this_dev]; diff --git a/trunk/drivers/net/igb/e1000_82575.c b/trunk/drivers/net/igb/e1000_82575.c index 2c8b91060d98..cda3ec879090 100644 --- a/trunk/drivers/net/igb/e1000_82575.c +++ b/trunk/drivers/net/igb/e1000_82575.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007 - 2008 Intel Corporation. + Copyright(c) 2007 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -171,10 +171,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; - - /* EEPROM access above 16k is unsupported */ - if (size > 14) - size = 14; nvm->word_size = 1 << size; /* setup PHY parameters */ @@ -226,7 +222,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) } /** - * igb_acquire_phy_82575 - Acquire rights to access PHY + * e1000_acquire_phy_82575 - Acquire rights to access PHY * @hw: pointer to the HW structure * * Acquire access rights to the correct PHY. This is a @@ -242,7 +238,7 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw) } /** - * igb_release_phy_82575 - Release rights to access PHY + * e1000_release_phy_82575 - Release rights to access PHY * @hw: pointer to the HW structure * * A wrapper to release access rights to the correct PHY. This is a @@ -257,7 +253,7 @@ static void igb_release_phy_82575(struct e1000_hw *hw) } /** - * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -272,7 +268,7 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u32 i, i2ccmd = 0; if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { - hw_dbg("PHY Address %u is out of range\n", offset); + hw_dbg(hw, "PHY Address %u is out of range\n", offset); return -E1000_ERR_PARAM; } @@ -295,11 +291,11 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, break; } if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Read did not complete\n"); + hw_dbg(hw, "I2CCMD Read did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); + hw_dbg(hw, "I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } @@ -310,7 +306,7 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, } /** - * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset @@ -326,7 +322,7 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 phy_data_swapped; if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { - hw_dbg("PHY Address %d is out of range\n", offset); + hw_dbg(hw, "PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } @@ -353,11 +349,11 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, break; } if (!(i2ccmd & E1000_I2CCMD_READY)) { - hw_dbg("I2CCMD Write did not complete\n"); + hw_dbg(hw, "I2CCMD Write did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { - hw_dbg("I2CCMD Error bit set\n"); + hw_dbg(hw, "I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } @@ -365,10 +361,10 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, } /** - * igb_get_phy_id_82575 - Retrieve PHY addr and id + * e1000_get_phy_id_82575 - Retreive PHY addr and id * @hw: pointer to the HW structure * - * Retrieves the PHY address and ID for both PHY's which do and do not use + * Retreives the PHY address and ID for both PHY's which do and do not use * sgmi interface. **/ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) @@ -397,8 +393,9 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) for (phy->addr = 1; phy->addr < 8; phy->addr++) { ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); if (ret_val == 0) { - hw_dbg("Vendor ID 0x%08X read at address %u\n", - phy_id, phy->addr); + hw_dbg(hw, "Vendor ID 0x%08X read at address %u\n", + phy_id, + phy->addr); /* * At the time of this writing, The M88 part is * the only supported SGMII PHY product. @@ -406,7 +403,8 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) if (phy_id == M88_VENDOR) break; } else { - hw_dbg("PHY address %u was unreadable\n", phy->addr); + hw_dbg(hw, "PHY address %u was unreadable\n", + phy->addr); } } @@ -424,7 +422,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) } /** - * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY using the serial gigabit media independent interface. @@ -438,7 +436,7 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) * available to us at this time. */ - hw_dbg("Soft resetting SGMII attached PHY...\n"); + hw_dbg(hw, "Soft resetting SGMII attached PHY...\n"); /* * SFP documentation requires the following to configure the SPF module @@ -455,7 +453,7 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) } /** - * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: true to enable LPLU, false to disable * @@ -473,29 +471,34 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) s32 ret_val; u16 data; - ret_val = phy->ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, + &data); if (ret_val) goto out; if (active) { data |= IGP02E1000_PM_D0_LPLU; - ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + ret_val = hw->phy.ops.write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ - ret_val = phy->ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &data); + ret_val = hw->phy.ops.read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - data); + ret_val = hw->phy.ops.write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } else { data &= ~IGP02E1000_PM_D0_LPLU; - ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, - data); + ret_val = hw->phy.ops.write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most @@ -503,25 +506,29 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { - ret_val = phy->ops.read_phy_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, &data); + ret_val = hw->phy.ops.read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_phy_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, data); + ret_val = hw->phy.ops.write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { - ret_val = phy->ops.read_phy_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, &data); + ret_val = hw->phy.ops.read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = phy->ops.write_phy_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, data); + ret_val = hw->phy.ops.write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); if (ret_val) goto out; } @@ -532,10 +539,10 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) } /** - * igb_acquire_nvm_82575 - Request for access to EEPROM + * e1000_acquire_nvm_82575 - Request for access to EEPROM * @hw: pointer to the HW structure * - * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Acquire the necessary semaphores for exclussive access to the EEPROM. * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). @@ -558,7 +565,7 @@ static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) } /** - * igb_release_nvm_82575 - Release exclusive access to EEPROM + * e1000_release_nvm_82575 - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit, @@ -571,7 +578,7 @@ static void igb_release_nvm_82575(struct e1000_hw *hw) } /** - * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * @@ -606,7 +613,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) } if (i == timeout) { - hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + hw_dbg(hw, "Can't access resource, SW_FW_SYNC timeout.\n"); ret_val = -E1000_ERR_SWFW_SYNC; goto out; } @@ -621,7 +628,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) } /** - * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * @@ -643,7 +650,7 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) } /** - * igb_get_cfg_done_82575 - Read config done bit + * e1000_get_cfg_done_82575 - Read config done bit * @hw: pointer to the HW structure * * Read the management control register for the config done bit for @@ -668,7 +675,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) timeout--; } if (!timeout) - hw_dbg("MNG configuration cycle has not completed.\n"); + hw_dbg(hw, "MNG configuration cycle has not completed.\n"); /* If EEPROM is not marked present, init the PHY manually */ if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && @@ -679,7 +686,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) } /** - * igb_check_for_link_82575 - Check for link + * e1000_check_for_link_82575 - Check for link * @hw: pointer to the HW structure * * If sgmii is enabled, then use the pcs register to determine link, otherwise @@ -702,12 +709,12 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) } /** - * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * - * Using the physical coding sub-layer (PCS), retrieve the current speed and + * Using the physical coding sub-layer (PCS), retreive the current speed and * duplex, then store the values in the pointers provided. **/ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, @@ -757,7 +764,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, } /** - * igb_rar_set_82575 - Set receive address register + * e1000_rar_set_82575 - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register @@ -774,7 +781,7 @@ static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index) } /** - * igb_reset_hw_82575 - Reset hardware + * e1000_reset_hw_82575 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. This is a @@ -791,9 +798,9 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) */ ret_val = igb_disable_pcie_master(hw); if (ret_val) - hw_dbg("PCI-E Master disable polling has failed.\n"); + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); - hw_dbg("Masking off all interrupts\n"); + hw_dbg(hw, "Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); @@ -804,7 +811,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) ctrl = rd32(E1000_CTRL); - hw_dbg("Issuing a global reset to MAC\n"); + hw_dbg(hw, "Issuing a global reset to MAC\n"); wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); ret_val = igb_get_auto_rd_done(hw); @@ -814,7 +821,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ - hw_dbg("Auto Read Done did not complete\n"); + hw_dbg(hw, "Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ @@ -831,7 +838,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw) } /** - * igb_init_hw_82575 - Initialize hardware + * e1000_init_hw_82575 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. @@ -845,18 +852,18 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = igb_id_led_init(hw); if (ret_val) { - hw_dbg("Error initializing identification LED\n"); + hw_dbg(hw, "Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Disabling VLAN filtering */ - hw_dbg("Initializing the IEEE VLAN\n"); + hw_dbg(hw, "Initializing the IEEE VLAN\n"); igb_clear_vfta(hw); /* Setup the receive address */ igb_init_rx_addrs(hw, rar_count); /* Zero out the Multicast HASH table */ - hw_dbg("Zeroing the MTA\n"); + hw_dbg(hw, "Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) array_wr32(E1000_MTA, i, 0); @@ -875,7 +882,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) } /** - * igb_setup_copper_link_82575 - Configure copper link settings + * e1000_setup_copper_link_82575 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check @@ -926,10 +933,10 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) * PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ - hw_dbg("Forcing Speed and Duplex\n"); + hw_dbg(hw, "Forcing Speed and Duplex\n"); ret_val = igb_phy_force_speed_duplex(hw); if (ret_val) { - hw_dbg("Error Forcing Speed and Duplex\n"); + hw_dbg(hw, "Error Forcing Speed and Duplex\n"); goto out; } } @@ -942,17 +949,20 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) * Check link status. Wait up to 100 microseconds for link to become * valid. */ - ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + ret_val = igb_phy_has_link(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); if (ret_val) goto out; if (link) { - hw_dbg("Valid link established!!!\n"); + hw_dbg(hw, "Valid link established!!!\n"); /* Config the MAC and PHY after link is up */ igb_config_collision_dist(hw); ret_val = igb_config_fc_after_link_up(hw); } else { - hw_dbg("Unable to establish link!!!\n"); + hw_dbg(hw, "Unable to establish link!!!\n"); } out: @@ -960,7 +970,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) } /** - * igb_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes + * e1000_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes * @hw: pointer to the HW structure * * Configures speed and duplex for fiber and serdes links. @@ -1008,7 +1018,7 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ - hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); + hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); } else { /* Set PCS register for forced speed */ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ @@ -1016,7 +1026,7 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ E1000_PCS_LCTL_FSD | /* Force Speed */ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ - hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); + hw_dbg(hw, "Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); } wr32(E1000_PCS_LCTL, reg); @@ -1024,7 +1034,7 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) } /** - * igb_configure_pcs_link_82575 - Configure PCS link + * e1000_configure_pcs_link_82575 - Configure PCS link * @hw: pointer to the HW structure * * Configure the physical coding sub-layer (PCS) link. The PCS link is @@ -1057,7 +1067,7 @@ static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw) */ reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE; } else { - /* Set PCS register for forced speed */ + /* Set PCS regiseter for forced speed */ /* Turn off bits for full duplex, speed, and autoneg */ reg &= ~(E1000_PCS_LCTL_FSV_1000 | @@ -1078,7 +1088,8 @@ static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw) E1000_PCS_LCTL_FORCE_LINK | E1000_PCS_LCTL_FLV_LINK_UP; - hw_dbg("Wrote 0x%08X to PCS_LCTL to configure forced link\n", + hw_dbg(hw, + "Wrote 0x%08X to PCS_LCTL to configure forced link\n", reg); } wr32(E1000_PCS_LCTL, reg); @@ -1088,7 +1099,7 @@ static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw) } /** - * igb_sgmii_active_82575 - Return sgmii state + * e1000_sgmii_active_82575 - Return sgmii state * @hw: pointer to the HW structure * * 82575 silicon has a serialized gigabit media independent interface (sgmii) @@ -1114,7 +1125,7 @@ static bool igb_sgmii_active_82575(struct e1000_hw *hw) } /** - * igb_reset_init_script_82575 - Inits HW defaults after reset + * e1000_reset_init_script_82575 - Inits HW defaults after reset * @hw: pointer to the HW structure * * Inits recommended HW defaults after a reset when there is no EEPROM @@ -1123,7 +1134,7 @@ static bool igb_sgmii_active_82575(struct e1000_hw *hw) static s32 igb_reset_init_script_82575(struct e1000_hw *hw) { if (hw->mac.type == e1000_82575) { - hw_dbg("Running reset init script for 82575\n"); + hw_dbg(hw, "Running reset init script for 82575\n"); /* SerDes configuration via SERDESCTRL */ igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); @@ -1150,7 +1161,7 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *hw) } /** - * igb_read_mac_addr_82575 - Read device MAC address + * e1000_read_mac_addr_82575 - Read device MAC address * @hw: pointer to the HW structure **/ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) @@ -1164,7 +1175,7 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) } /** - * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. @@ -1227,79 +1238,6 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) temp = rd32(E1000_SCVPC); } -/** - * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable - * @hw: pointer to the HW structure - * - * After rx enable if managability is enabled then there is likely some - * bad data at the start of the fifo and possibly in the DMA fifo. This - * function clears the fifos and flushes any packets that came in as rx was - * being enabled. - **/ -void igb_rx_fifo_flush_82575(struct e1000_hw *hw) -{ - u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; - int i, ms_wait; - - if (hw->mac.type != e1000_82575 || - !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) - return; - - /* Disable all RX queues */ - for (i = 0; i < 4; i++) { - rxdctl[i] = rd32(E1000_RXDCTL(i)); - wr32(E1000_RXDCTL(i), - rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); - } - /* Poll all queues to verify they have shut down */ - for (ms_wait = 0; ms_wait < 10; ms_wait++) { - msleep(1); - rx_enabled = 0; - for (i = 0; i < 4; i++) - rx_enabled |= rd32(E1000_RXDCTL(i)); - if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) - break; - } - - if (ms_wait == 10) - hw_dbg("Queue disable timed out after 10ms\n"); - - /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all - * incoming packets are rejected. Set enable and wait 2ms so that - * any packet that was coming in as RCTL.EN was set is flushed - */ - rfctl = rd32(E1000_RFCTL); - wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); - - rlpml = rd32(E1000_RLPML); - wr32(E1000_RLPML, 0); - - rctl = rd32(E1000_RCTL); - temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); - temp_rctl |= E1000_RCTL_LPE; - - wr32(E1000_RCTL, temp_rctl); - wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); - wrfl(); - msleep(2); - - /* Enable RX queues that were previously enabled and restore our - * previous state - */ - for (i = 0; i < 4; i++) - wr32(E1000_RXDCTL(i), rxdctl[i]); - wr32(E1000_RCTL, rctl); - wrfl(); - - wr32(E1000_RLPML, rlpml); - wr32(E1000_RFCTL, rfctl); - - /* Flush receive errors generated by workaround */ - rd32(E1000_ROC); - rd32(E1000_RNBC); - rd32(E1000_MPC); -} - static struct e1000_mac_operations e1000_mac_ops_82575 = { .reset_hw = igb_reset_hw_82575, .init_hw = igb_init_hw_82575, diff --git a/trunk/drivers/net/igb/e1000_82575.h b/trunk/drivers/net/igb/e1000_82575.h index d78ad33d32bf..76ea846663db 100644 --- a/trunk/drivers/net/igb/e1000_82575.h +++ b/trunk/drivers/net/igb/e1000_82575.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007 - 2008 Intel Corporation. + Copyright(c) 2007 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,8 +28,6 @@ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ -extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); - #define E1000_RAR_ENTRIES_82575 16 /* SRRCTL bit definitions */ @@ -58,7 +56,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); #define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE #define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE -/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { @@ -147,6 +145,6 @@ struct e1000_adv_tx_context_desc { -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ #endif diff --git a/trunk/drivers/net/igb/e1000_defines.h b/trunk/drivers/net/igb/e1000_defines.h index ed748dcfb7a4..8da9ffedc425 100644 --- a/trunk/drivers/net/igb/e1000_defines.h +++ b/trunk/drivers/net/igb/e1000_defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007 - 2008 Intel Corporation. + Copyright(c) 2007 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -91,12 +91,12 @@ #define E1000_MAX_SGMII_PHY_REG_ADDR 255 #define E1000_I2CCMD_PHY_TIMEOUT 200 -/* Receive Descriptor bit definitions */ +/* Receive Decriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ #define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ @@ -340,7 +340,6 @@ #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ -#define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ #define E1000_COLLISION_THRESHOLD 15 @@ -380,7 +379,7 @@ #define E1000_ICR_RXO 0x00000040 /* rx overrun */ #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ #define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ -#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ #define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ #define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ #define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ @@ -444,6 +443,12 @@ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +/* queue 0 Rx descriptor FIFO parity error */ +/* queue 0 Tx descriptor FIFO parity error */ +/* host arb read buffer parity error */ +/* packet buffer parity error */ +/* queue 1 Rx descriptor FIFO parity error */ +/* queue 1 Tx descriptor FIFO parity error */ /* Extended Interrupt Mask Set */ #define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ @@ -452,6 +457,12 @@ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +/* queue 0 Rx descriptor FIFO parity error */ +/* queue 0 Tx descriptor FIFO parity error */ +/* host arb read buffer parity error */ +/* packet buffer parity error */ +/* queue 1 Rx descriptor FIFO parity error */ +/* queue 1 Tx descriptor FIFO parity error */ /* Extended Interrupt Cause Set */ @@ -556,6 +567,7 @@ /* 1000BASE-T Control Register */ #define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ #define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + /* 0=DTE device */ #define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ /* 0=Configure PHY as Slave */ #define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ @@ -569,7 +581,7 @@ /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ #define PHY_CONTROL 0x00 /* Control Register */ -#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ @@ -696,8 +708,8 @@ /* Auto crossover enabled all speeds */ #define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* - * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold - * 0=Normal 10BASE-T Rx Threshold + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold + * 0=Normal 10BASE-T RX Threshold */ /* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ diff --git a/trunk/drivers/net/igb/e1000_hw.h b/trunk/drivers/net/igb/e1000_hw.h index 746c3ea09e27..7b2c70a3b8cc 100644 --- a/trunk/drivers/net/igb/e1000_hw.h +++ b/trunk/drivers/net/igb/e1000_hw.h @@ -586,10 +586,14 @@ struct e1000_hw { #ifdef DEBUG extern char *igb_get_hw_dev_name(struct e1000_hw *hw); -#define hw_dbg(format, arg...) \ +#define hw_dbg(hw, format, arg...) \ printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg) #else -#define hw_dbg(format, arg...) +static inline int __attribute__ ((format (printf, 2, 3))) +hw_dbg(struct e1000_hw *hw, const char *format, ...) +{ + return 0; +} #endif #endif diff --git a/trunk/drivers/net/igb/e1000_mac.c b/trunk/drivers/net/igb/e1000_mac.c index 47ad2c4277c3..3e84a3f0c1d8 100644 --- a/trunk/drivers/net/igb/e1000_mac.c +++ b/trunk/drivers/net/igb/e1000_mac.c @@ -39,7 +39,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw); static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); /** - * igb_remove_device - Free device specific structure + * e1000_remove_device - Free device specific structure * @hw: pointer to the HW structure * * If a device specific structure was allocated, this function will @@ -73,7 +73,7 @@ static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) } /** - * igb_get_bus_info_pcie - Get PCIe bus information + * e1000_get_bus_info_pcie - Get PCIe bus information * @hw: pointer to the HW structure * * Determines and stores the system bus information for a particular @@ -113,7 +113,7 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw) } /** - * igb_clear_vfta - Clear VLAN filter table + * e1000_clear_vfta - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by @@ -130,7 +130,7 @@ void igb_clear_vfta(struct e1000_hw *hw) } /** - * igb_write_vfta - Write value to VLAN filter table + * e1000_write_vfta - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table @@ -145,7 +145,7 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) } /** - * igb_init_rx_addrs - Initialize receive address's + * e1000_init_rx_addrs - Initialize receive address's * @hw: pointer to the HW structure * @rar_count: receive address registers * @@ -158,12 +158,12 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) u32 i; /* Setup the receive address */ - hw_dbg("Programming MAC Address into RAR[0]\n"); + hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ - hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); for (i = 1; i < rar_count; i++) { array_wr32(E1000_RA, (i << 1), 0); wrfl(); @@ -173,7 +173,7 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) } /** - * igb_check_alt_mac_addr - Check for alternate MAC addr + * e1000_check_alt_mac_addr - Check for alternate MAC addr * @hw: pointer to the HW structure * * Checks the nvm for an alternate MAC address. An alternate MAC address @@ -193,7 +193,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &nvm_alt_mac_addr_offset); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } @@ -209,7 +209,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) offset = nvm_alt_mac_addr_offset + (i >> 1); ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } @@ -233,7 +233,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw) } /** - * igb_rar_set - Set receive address register + * e1000_rar_set - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register @@ -263,7 +263,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) } /** - * igb_mta_set - Set multicast filter table address + * e1000_mta_set - Set multicast filter table address * @hw: pointer to the HW structure * @hash_value: determines the MTA register and bit to set * @@ -298,7 +298,7 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) } /** - * igb_update_mc_addr_list - Update Multicast addresses + * e1000_update_mc_addr_list - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program @@ -336,7 +336,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, } /* Clear the old settings from the MTA */ - hw_dbg("Clearing MTA\n"); + hw_dbg(hw, "Clearing MTA\n"); for (i = 0; i < hw->mac.mta_reg_count; i++) { array_wr32(E1000_MTA, i, 0); wrfl(); @@ -345,14 +345,14 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, /* Load any remaining multicast addresses into the hash table. */ for (; mc_addr_count > 0; mc_addr_count--) { hash_value = igb_hash_mc_addr(hw, mc_addr_list); - hw_dbg("Hash value = 0x%03X\n", hash_value); + hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); igb_mta_set(hw, hash_value); mc_addr_list += ETH_ALEN; } } /** - * igb_hash_mc_addr - Generate a multicast hash value + * e1000_hash_mc_addr - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address * @@ -423,7 +423,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) } /** - * igb_clear_hw_cntrs_base - Clear base hardware counters + * e1000_clear_hw_cntrs_base - Clear base hardware counters * @hw: pointer to the HW structure * * Clears the base hardware counters by reading the counter registers. @@ -472,7 +472,7 @@ void igb_clear_hw_cntrs_base(struct e1000_hw *hw) } /** - * igb_check_for_copper_link - Check for link (Copper) + * e1000_check_for_copper_link - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a @@ -540,14 +540,14 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw) */ ret_val = igb_config_fc_after_link_up(hw); if (ret_val) - hw_dbg("Error configuring flow control\n"); + hw_dbg(hw, "Error configuring flow control\n"); out: return ret_val; } /** - * igb_setup_link - Setup flow control and link settings + * e1000_setup_link - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow @@ -578,7 +578,7 @@ s32 igb_setup_link(struct e1000_hw *hw) */ hw->fc.original_type = hw->fc.type; - hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.type); + hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); /* Call the necessary media_type subroutine to configure the link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); @@ -591,7 +591,8 @@ s32 igb_setup_link(struct e1000_hw *hw) * control is disabled, because it does not hurt anything to * initialize these registers. */ - hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + hw_dbg(hw, + "Initializing the Flow Control address, type and timer regs\n"); wr32(E1000_FCT, FLOW_CONTROL_TYPE); wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); @@ -605,7 +606,7 @@ s32 igb_setup_link(struct e1000_hw *hw) } /** - * igb_config_collision_dist - Configure collision distance + * e1000_config_collision_dist - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used @@ -626,7 +627,7 @@ void igb_config_collision_dist(struct e1000_hw *hw) } /** - * igb_set_fc_watermarks - Set flow control high/low watermarks + * e1000_set_fc_watermarks - Set flow control high/low watermarks * @hw: pointer to the HW structure * * Sets the flow control high/low threshold (watermark) registers. If @@ -664,7 +665,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw) } /** - * igb_set_default_fc - Set flow control default values + * e1000_set_default_fc - Set flow control default values * @hw: pointer to the HW structure * * Read the EEPROM for the default values for flow control and store the @@ -688,7 +689,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } @@ -705,7 +706,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw) } /** - * igb_force_mac_fc - Force the MAC's flow control settings + * e1000_force_mac_fc - Force the MAC's flow control settings * @hw: pointer to the HW structure * * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the @@ -739,7 +740,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) * 3: Both Rx and TX flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ - hw_dbg("hw->fc.type = %u\n", hw->fc.type); + hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type); switch (hw->fc.type) { case e1000_fc_none: @@ -757,7 +758,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: - hw_dbg("Flow control param set incorrectly\n"); + hw_dbg(hw, "Flow control param set incorrectly\n"); ret_val = -E1000_ERR_CONFIG; goto out; } @@ -769,7 +770,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw) } /** - * igb_config_fc_after_link_up - Configures flow control after link + * e1000_config_fc_after_link_up - Configures flow control after link * @hw: pointer to the HW structure * * Checks the status of auto-negotiation after link up to ensure that the @@ -800,7 +801,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) } if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); + hw_dbg(hw, "Error forcing flow control settings\n"); goto out; } @@ -826,7 +827,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) goto out; if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - hw_dbg("Copper PHY and Auto Neg " + hw_dbg(hw, "Copper PHY and Auto Neg " "has not completed.\n"); goto out; } @@ -892,11 +893,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ if (hw->fc.original_type == e1000_fc_full) { hw->fc.type = e1000_fc_full; - hw_dbg("Flow Control = FULL.\r\n"); + hw_dbg(hw, "Flow Control = FULL.\r\n"); } else { hw->fc.type = e1000_fc_rx_pause; - hw_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); + hw_dbg(hw, "Flow Control = " + "RX PAUSE frames only.\r\n"); } } /* @@ -912,7 +913,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.type = e1000_fc_tx_pause; - hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); } /* * For transmitting PAUSE frames ONLY. @@ -927,7 +928,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.type = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); } /* * Per the IEEE spec, at this point flow control should be @@ -954,10 +955,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) hw->fc.original_type == e1000_fc_tx_pause) || hw->fc.strict_ieee) { hw->fc.type = e1000_fc_none; - hw_dbg("Flow Control = NONE.\r\n"); + hw_dbg(hw, "Flow Control = NONE.\r\n"); } else { hw->fc.type = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); } /* @@ -967,7 +968,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) { - hw_dbg("Error getting link speed and duplex\n"); + hw_dbg(hw, "Error getting link speed and duplex\n"); goto out; } @@ -980,7 +981,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ ret_val = igb_force_mac_fc(hw); if (ret_val) { - hw_dbg("Error forcing flow control settings\n"); + hw_dbg(hw, "Error forcing flow control settings\n"); goto out; } } @@ -990,7 +991,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) } /** - * igb_get_speed_and_duplex_copper - Retreive current speed/duplex + * e1000_get_speed_and_duplex_copper - Retreive current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex @@ -1006,28 +1007,28 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, status = rd32(E1000_STATUS); if (status & E1000_STATUS_SPEED_1000) { *speed = SPEED_1000; - hw_dbg("1000 Mbs, "); + hw_dbg(hw, "1000 Mbs, "); } else if (status & E1000_STATUS_SPEED_100) { *speed = SPEED_100; - hw_dbg("100 Mbs, "); + hw_dbg(hw, "100 Mbs, "); } else { *speed = SPEED_10; - hw_dbg("10 Mbs, "); + hw_dbg(hw, "10 Mbs, "); } if (status & E1000_STATUS_FD) { *duplex = FULL_DUPLEX; - hw_dbg("Full Duplex\n"); + hw_dbg(hw, "Full Duplex\n"); } else { *duplex = HALF_DUPLEX; - hw_dbg("Half Duplex\n"); + hw_dbg(hw, "Half Duplex\n"); } return 0; } /** - * igb_get_hw_semaphore - Acquire hardware semaphore + * e1000_get_hw_semaphore - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM @@ -1050,7 +1051,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw) } if (i == timeout) { - hw_dbg("Driver can't access device - SMBI bit is set.\n"); + hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -1070,7 +1071,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw) if (i == timeout) { /* Release semaphores */ igb_put_hw_semaphore(hw); - hw_dbg("Driver can't access the NVM\n"); + hw_dbg(hw, "Driver can't access the NVM\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -1080,7 +1081,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw) } /** - * igb_put_hw_semaphore - Release hardware semaphore + * e1000_put_hw_semaphore - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM @@ -1097,7 +1098,7 @@ void igb_put_hw_semaphore(struct e1000_hw *hw) } /** - * igb_get_auto_rd_done - Check for auto read completion + * e1000_get_auto_rd_done - Check for auto read completion * @hw: pointer to the HW structure * * Check EEPROM for Auto Read done bit. @@ -1116,7 +1117,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw) } if (i == AUTO_READ_DONE_TIMEOUT) { - hw_dbg("Auto read by HW from NVM has not completed.\n"); + hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); ret_val = -E1000_ERR_RESET; goto out; } @@ -1126,7 +1127,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw) } /** - * igb_valid_led_default - Verify a valid default LED config + * e1000_valid_led_default - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * @@ -1139,7 +1140,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } @@ -1151,7 +1152,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) } /** - * igb_id_led_init - + * e1000_id_led_init - * @hw: pointer to the HW structure * **/ @@ -1216,7 +1217,7 @@ s32 igb_id_led_init(struct e1000_hw *hw) } /** - * igb_cleanup_led - Set LED config to default operation + * e1000_cleanup_led - Set LED config to default operation * @hw: pointer to the HW structure * * Remove the current LED configuration and set the LED configuration @@ -1229,7 +1230,7 @@ s32 igb_cleanup_led(struct e1000_hw *hw) } /** - * igb_blink_led - Blink LED + * e1000_blink_led - Blink LED * @hw: pointer to the HW structure * * Blink the led's which are set to be on. @@ -1262,7 +1263,7 @@ s32 igb_blink_led(struct e1000_hw *hw) } /** - * igb_led_off - Turn LED off + * e1000_led_off - Turn LED off * @hw: pointer to the HW structure * * Turn LED off. @@ -1289,7 +1290,7 @@ s32 igb_led_off(struct e1000_hw *hw) } /** - * igb_disable_pcie_master - Disables PCI-express master access + * e1000_disable_pcie_master - Disables PCI-express master access * @hw: pointer to the HW structure * * Returns 0 (0) if successful, else returns -10 @@ -1321,7 +1322,7 @@ s32 igb_disable_pcie_master(struct e1000_hw *hw) } if (!timeout) { - hw_dbg("Master requests are pending.\n"); + hw_dbg(hw, "Master requests are pending.\n"); ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; goto out; } @@ -1331,7 +1332,7 @@ s32 igb_disable_pcie_master(struct e1000_hw *hw) } /** - * igb_reset_adaptive - Reset Adaptive Interframe Spacing + * e1000_reset_adaptive - Reset Adaptive Interframe Spacing * @hw: pointer to the HW structure * * Reset the Adaptive Interframe Spacing throttle to default values. @@ -1341,7 +1342,7 @@ void igb_reset_adaptive(struct e1000_hw *hw) struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { - hw_dbg("Not in Adaptive IFS mode!\n"); + hw_dbg(hw, "Not in Adaptive IFS mode!\n"); goto out; } @@ -1360,7 +1361,7 @@ void igb_reset_adaptive(struct e1000_hw *hw) } /** - * igb_update_adaptive - Update Adaptive Interframe Spacing + * e1000_update_adaptive - Update Adaptive Interframe Spacing * @hw: pointer to the HW structure * * Update the Adaptive Interframe Spacing Throttle value based on the @@ -1371,7 +1372,7 @@ void igb_update_adaptive(struct e1000_hw *hw) struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { - hw_dbg("Not in Adaptive IFS mode!\n"); + hw_dbg(hw, "Not in Adaptive IFS mode!\n"); goto out; } @@ -1401,7 +1402,7 @@ void igb_update_adaptive(struct e1000_hw *hw) } /** - * igb_validate_mdi_setting - Verify MDI/MDIx settings + * e1000_validate_mdi_setting - Verify MDI/MDIx settings * @hw: pointer to the HW structure * * Verify that when not using auto-negotitation that MDI/MDIx is correctly @@ -1412,7 +1413,7 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw) s32 ret_val = 0; if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { - hw_dbg("Invalid MDI setting detected\n"); + hw_dbg(hw, "Invalid MDI setting detected\n"); hw->phy.mdix = 1; ret_val = -E1000_ERR_CONFIG; goto out; @@ -1423,7 +1424,7 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw) } /** - * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * e1000_write_8bit_ctrl_reg - Write a 8bit CTRL register * @hw: pointer to the HW structure * @reg: 32bit register offset such as E1000_SCTL * @offset: register offset to write to @@ -1451,7 +1452,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, break; } if (!(regvalue & E1000_GEN_CTL_READY)) { - hw_dbg("Reg %08x did not indicate ready\n", reg); + hw_dbg(hw, "Reg %08x did not indicate ready\n", reg); ret_val = -E1000_ERR_PHY; goto out; } @@ -1461,7 +1462,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, } /** - * igb_enable_mng_pass_thru - Enable processing of ARP's + * e1000_enable_mng_pass_thru - Enable processing of ARP's * @hw: pointer to the HW structure * * Verifies the hardware needs to allow ARPs to be processed by the host. diff --git a/trunk/drivers/net/igb/e1000_nvm.c b/trunk/drivers/net/igb/e1000_nvm.c index a84e4e429fa7..2897106fee92 100644 --- a/trunk/drivers/net/igb/e1000_nvm.c +++ b/trunk/drivers/net/igb/e1000_nvm.c @@ -32,7 +32,7 @@ #include "e1000_nvm.h" /** - * igb_raise_eec_clk - Raise EEPROM clock + * e1000_raise_eec_clk - Raise EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * @@ -47,7 +47,7 @@ static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) } /** - * igb_lower_eec_clk - Lower EEPROM clock + * e1000_lower_eec_clk - Lower EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * @@ -62,7 +62,7 @@ static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) } /** - * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM * @hw: pointer to the HW structure * @data: data to send to the EEPROM * @count: number of bits to shift out @@ -105,7 +105,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) } /** - * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM * @hw: pointer to the HW structure * @count: number of bits to shift in * @@ -143,7 +143,7 @@ static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) } /** - * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion * @hw: pointer to the HW structure * @ee_reg: EEPROM flag for polling * @@ -174,7 +174,7 @@ static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) } /** - * igb_acquire_nvm - Generic request for access to EEPROM + * e1000_acquire_nvm - Generic request for access to EEPROM * @hw: pointer to the HW structure * * Set the EEPROM access request bit and wait for EEPROM access grant bit. @@ -202,7 +202,7 @@ s32 igb_acquire_nvm(struct e1000_hw *hw) if (!timeout) { eecd &= ~E1000_EECD_REQ; wr32(E1000_EECD, eecd); - hw_dbg("Could not acquire NVM grant\n"); + hw_dbg(hw, "Could not acquire NVM grant\n"); ret_val = -E1000_ERR_NVM; } @@ -210,7 +210,7 @@ s32 igb_acquire_nvm(struct e1000_hw *hw) } /** - * igb_standby_nvm - Return EEPROM to standby state + * e1000_standby_nvm - Return EEPROM to standby state * @hw: pointer to the HW structure * * Return the EEPROM to a standby state. @@ -273,7 +273,7 @@ static void e1000_stop_nvm(struct e1000_hw *hw) } /** - * igb_release_nvm - Release exclusive access to EEPROM + * e1000_release_nvm - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. @@ -290,7 +290,7 @@ void igb_release_nvm(struct e1000_hw *hw) } /** - * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write * @hw: pointer to the HW structure * * Setups the EEPROM for reading and writing. @@ -337,7 +337,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) } if (!timeout) { - hw_dbg("SPI NVM Status error\n"); + hw_dbg(hw, "SPI NVM Status error\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -348,7 +348,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) } /** - * igb_read_nvm_eerd - Reads EEPROM using EERD register + * e1000_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read @@ -368,7 +368,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -391,7 +391,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) } /** - * igb_write_nvm_spi - Write to EEPROM using SPI + * e1000_write_nvm_spi - Write to EEPROM using SPI * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write @@ -414,7 +414,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { - hw_dbg("nvm parameter(s) out of bounds\n"); + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -475,7 +475,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) } /** - * igb_read_part_num - Read device part number + * e1000_read_part_num - Read device part number * @hw: pointer to the HW structure * @part_num: pointer to device part number * @@ -489,14 +489,14 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num) ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } *part_num = (u32)(nvm_data << 16); ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } *part_num |= nvm_data; @@ -506,7 +506,7 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num) } /** - * igb_read_mac_addr - Read device MAC address + * e1000_read_mac_addr - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. @@ -522,7 +522,7 @@ s32 igb_read_mac_addr(struct e1000_hw *hw) offset = i >> 1; ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); @@ -541,7 +541,7 @@ s32 igb_read_mac_addr(struct e1000_hw *hw) } /** - * igb_validate_nvm_checksum - Validate EEPROM checksum + * e1000_validate_nvm_checksum - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM @@ -556,14 +556,14 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw) for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error\n"); + hw_dbg(hw, "NVM Read Error\n"); goto out; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { - hw_dbg("NVM Checksum Invalid\n"); + hw_dbg(hw, "NVM Checksum Invalid\n"); ret_val = -E1000_ERR_NVM; goto out; } @@ -573,7 +573,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw) } /** - * igb_update_nvm_checksum - Update EEPROM checksum + * e1000_update_nvm_checksum - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM @@ -589,7 +589,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw) for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); if (ret_val) { - hw_dbg("NVM Read Error while updating checksum.\n"); + hw_dbg(hw, "NVM Read Error while updating checksum.\n"); goto out; } checksum += nvm_data; @@ -597,7 +597,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw) checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val) - hw_dbg("NVM Write Error while updating checksum.\n"); + hw_dbg(hw, "NVM Write Error while updating checksum.\n"); out: return ret_val; diff --git a/trunk/drivers/net/igb/e1000_phy.c b/trunk/drivers/net/igb/e1000_phy.c index 17fddb91c9f5..08a86b107229 100644 --- a/trunk/drivers/net/igb/e1000_phy.c +++ b/trunk/drivers/net/igb/e1000_phy.c @@ -61,7 +61,7 @@ static const u16 e1000_igp_2_cable_length_table[] = sizeof(e1000_igp_2_cable_length_table[0])) /** - * igb_check_reset_block - Check if PHY reset is blocked + * e1000_check_reset_block - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Read the PHY management control register and check whether a PHY reset @@ -79,7 +79,7 @@ s32 igb_check_reset_block(struct e1000_hw *hw) } /** - * igb_get_phy_id - Retrieve the PHY ID and revision + * e1000_get_phy_id - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY @@ -109,7 +109,7 @@ s32 igb_get_phy_id(struct e1000_hw *hw) } /** - * igb_phy_reset_dsp - Reset PHY DSP + * e1000_phy_reset_dsp - Reset PHY DSP * @hw: pointer to the HW structure * * Reset the digital signal processor. @@ -129,7 +129,7 @@ static s32 igb_phy_reset_dsp(struct e1000_hw *hw) } /** - * igb_read_phy_reg_mdic - Read MDI control register + * e1000_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -144,7 +144,7 @@ static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) s32 ret_val = 0; if (offset > MAX_PHY_REG_ADDRESS) { - hw_dbg("PHY Address %d is out of range\n", offset); + hw_dbg(hw, "PHY Address %d is out of range\n", offset); ret_val = -E1000_ERR_PARAM; goto out; } @@ -172,12 +172,12 @@ static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) break; } if (!(mdic & E1000_MDIC_READY)) { - hw_dbg("MDI Read did not complete\n"); + hw_dbg(hw, "MDI Read did not complete\n"); ret_val = -E1000_ERR_PHY; goto out; } if (mdic & E1000_MDIC_ERROR) { - hw_dbg("MDI Error\n"); + hw_dbg(hw, "MDI Error\n"); ret_val = -E1000_ERR_PHY; goto out; } @@ -188,7 +188,7 @@ static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) } /** - * igb_write_phy_reg_mdic - Write MDI control register + * e1000_write_phy_reg_mdic - Write MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset @@ -202,7 +202,7 @@ static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) s32 ret_val = 0; if (offset > MAX_PHY_REG_ADDRESS) { - hw_dbg("PHY Address %d is out of range\n", offset); + hw_dbg(hw, "PHY Address %d is out of range\n", offset); ret_val = -E1000_ERR_PARAM; goto out; } @@ -231,12 +231,12 @@ static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) break; } if (!(mdic & E1000_MDIC_READY)) { - hw_dbg("MDI Write did not complete\n"); + hw_dbg(hw, "MDI Write did not complete\n"); ret_val = -E1000_ERR_PHY; goto out; } if (mdic & E1000_MDIC_ERROR) { - hw_dbg("MDI Error\n"); + hw_dbg(hw, "MDI Error\n"); ret_val = -E1000_ERR_PHY; goto out; } @@ -246,7 +246,7 @@ static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) } /** - * igb_read_phy_reg_igp - Read igp PHY register + * e1000_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data @@ -284,7 +284,7 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) } /** - * igb_write_phy_reg_igp - Write igp PHY register + * e1000_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset @@ -321,7 +321,7 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) } /** - * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock @@ -423,7 +423,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) /* Commit the changes. */ ret_val = igb_phy_sw_reset(hw); if (ret_val) { - hw_dbg("Error committing the PHY changes\n"); + hw_dbg(hw, "Error committing the PHY changes\n"); goto out; } @@ -432,7 +432,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw) } /** - * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link * @hw: pointer to the HW structure * * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for @@ -451,7 +451,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) ret_val = hw->phy.ops.reset_phy(hw); if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); + hw_dbg(hw, "Error resetting the PHY.\n"); goto out; } @@ -467,7 +467,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) if (hw->phy.ops.set_d3_lplu_state) ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); if (ret_val) { - hw_dbg("Error Disabling LPLU D3\n"); + hw_dbg(hw, "Error Disabling LPLU D3\n"); goto out; } } @@ -475,7 +475,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) /* disable lplu d0 during driver init */ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); if (ret_val) { - hw_dbg("Error Disabling LPLU D0\n"); + hw_dbg(hw, "Error Disabling LPLU D0\n"); goto out; } /* Configure mdi-mdix settings */ @@ -570,7 +570,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw) } /** - * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link * @hw: pointer to the HW structure * * Performs initial bounds checking on autoneg advertisement parameter, then @@ -597,13 +597,13 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw) if (phy->autoneg_advertised == 0) phy->autoneg_advertised = phy->autoneg_mask; - hw_dbg("Reconfiguring auto-neg advertisement params\n"); + hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); ret_val = igb_phy_setup_autoneg(hw); if (ret_val) { - hw_dbg("Error Setting up Auto-Negotiation\n"); + hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); goto out; } - hw_dbg("Restarting Auto-Neg\n"); + hw_dbg(hw, "Restarting Auto-Neg\n"); /* * Restart auto-negotiation by setting the Auto Neg Enable bit and @@ -625,8 +625,8 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw) if (phy->autoneg_wait_to_complete) { ret_val = igb_wait_autoneg(hw); if (ret_val) { - hw_dbg("Error while waiting for " - "autoneg to complete\n"); + hw_dbg(hw, "Error while waiting for " + "autoneg to complete\n"); goto out; } } @@ -638,7 +638,7 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw) } /** - * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * * Reads the MII auto-neg advertisement register and/or the 1000T control @@ -689,39 +689,39 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) NWAY_AR_10T_HD_CAPS); mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); - hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { - hw_dbg("Advertise 10mb Half duplex\n"); + hw_dbg(hw, "Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { - hw_dbg("Advertise 10mb Full duplex\n"); + hw_dbg(hw, "Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { - hw_dbg("Advertise 100mb Half duplex\n"); + hw_dbg(hw, "Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { - hw_dbg("Advertise 100mb Full duplex\n"); + hw_dbg(hw, "Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ if (phy->autoneg_advertised & ADVERTISE_1000_HALF) - hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { - hw_dbg("Advertise 1000mb Full duplex\n"); + hw_dbg(hw, "Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } @@ -780,7 +780,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; default: - hw_dbg("Flow control param set incorrectly\n"); + hw_dbg(hw, "Flow control param set incorrectly\n"); ret_val = -E1000_ERR_CONFIG; goto out; } @@ -790,7 +790,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) if (ret_val) goto out; - hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); if (phy->autoneg_mask & ADVERTISE_1000_FULL) { ret_val = hw->phy.ops.write_phy_reg(hw, @@ -805,7 +805,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) } /** - * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the @@ -846,12 +846,13 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) if (ret_val) goto out; - hw_dbg("IGP PSCR: %X\n", phy_data); + hw_dbg(hw, "IGP PSCR: %X\n", phy_data); udelay(1); if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + hw_dbg(hw, + "Waiting for forced speed/duplex link on IGP phy.\n"); ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, @@ -861,7 +862,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) goto out; if (!link) - hw_dbg("Link taking longer than expected.\n"); + hw_dbg(hw, "Link taking longer than expected.\n"); /* Try once more */ ret_val = igb_phy_has_link(hw, @@ -877,7 +878,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) } /** - * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the @@ -908,7 +909,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (ret_val) goto out; - hw_dbg("M88E1000 PSCR: %X\n", phy_data); + hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) @@ -926,7 +927,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) udelay(1); if (phy->autoneg_wait_to_complete) { - hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + hw_dbg(hw, + "Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, @@ -991,7 +993,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) } /** - * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure * @phy_ctrl: pointer to current value of PHY_CONTROL * @@ -1026,11 +1028,11 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; *phy_ctrl &= ~MII_CR_FULL_DUPLEX; - hw_dbg("Half Duplex\n"); + hw_dbg(hw, "Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; *phy_ctrl |= MII_CR_FULL_DUPLEX; - hw_dbg("Full Duplex\n"); + hw_dbg(hw, "Full Duplex\n"); } /* Forcing 10mb or 100mb? */ @@ -1038,12 +1040,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= MII_CR_SPEED_100; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); - hw_dbg("Forcing 100mb\n"); + hw_dbg(hw, "Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); *phy_ctrl |= MII_CR_SPEED_10; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); - hw_dbg("Forcing 10mb\n"); + hw_dbg(hw, "Forcing 10mb\n"); } igb_config_collision_dist(hw); @@ -1052,7 +1054,7 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, } /** - * igb_set_d3_lplu_state - Sets low power link up state for D3 + * e1000_set_d3_lplu_state - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * @@ -1144,7 +1146,7 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) } /** - * igb_check_downshift - Checks whether a downshift in speed occured + * e1000_check_downshift - Checks whether a downshift in speed occured * @hw: pointer to the HW structure * * Success returns 0, Failure returns 1 @@ -1186,7 +1188,7 @@ s32 igb_check_downshift(struct e1000_hw *hw) } /** - * igb_check_polarity_m88 - Checks the polarity. + * e1000_check_polarity_m88 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) @@ -1210,7 +1212,7 @@ static s32 igb_check_polarity_m88(struct e1000_hw *hw) } /** - * igb_check_polarity_igp - Checks the polarity. + * e1000_check_polarity_igp - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) @@ -1258,7 +1260,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw) } /** - * igb_wait_autoneg - Wait for auto-neg compeletion + * e1000_wait_autoneg - Wait for auto-neg compeletion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time @@ -1290,7 +1292,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw) } /** - * igb_phy_has_link - Polls PHY for link + * e1000_phy_has_link - Polls PHY for link * @hw: pointer to the HW structure * @iterations: number of times to poll for link * @usec_interval: delay between polling attempts @@ -1330,7 +1332,7 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, } /** - * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY * @hw: pointer to the HW structure * * Reads the PHY specific status register to retrieve the cable length @@ -1367,7 +1369,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) } /** - * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the @@ -1440,7 +1442,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) } /** - * igb_get_phy_info_m88 - Retrieve PHY information + * e1000_get_phy_info_m88 - Retrieve PHY information * @hw: pointer to the HW structure * * Valid for only copper links. Read the PHY status register (sticky read) @@ -1457,7 +1459,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw) bool link; if (hw->phy.media_type != e1000_media_type_copper) { - hw_dbg("Phy info is only valid for copper media\n"); + hw_dbg(hw, "Phy info is only valid for copper media\n"); ret_val = -E1000_ERR_CONFIG; goto out; } @@ -1467,7 +1469,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw) goto out; if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); + hw_dbg(hw, "Phy info is only valid if link is up\n"); ret_val = -E1000_ERR_CONFIG; goto out; } @@ -1521,7 +1523,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw) } /** - * igb_get_phy_info_igp - Retrieve igp PHY information + * e1000_get_phy_info_igp - Retrieve igp PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then @@ -1541,7 +1543,7 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw) goto out; if (!link) { - hw_dbg("Phy info is only valid if link is up\n"); + hw_dbg(hw, "Phy info is only valid if link is up\n"); ret_val = -E1000_ERR_CONFIG; goto out; } @@ -1588,7 +1590,7 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw) } /** - * igb_phy_sw_reset - PHY software reset + * e1000_phy_sw_reset - PHY software reset * @hw: pointer to the HW structure * * Does a software reset of the PHY by reading the PHY control register and @@ -1615,7 +1617,7 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw) } /** - * igb_phy_hw_reset - PHY hardware reset + * e1000_phy_hw_reset - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire @@ -1661,7 +1663,7 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw) /* Internal function pointers */ /** - * igb_get_phy_cfg_done - Generic PHY configuration done + * e1000_get_phy_cfg_done - Generic PHY configuration done * @hw: pointer to the HW structure * * Return success if silicon family did not implement a family specific @@ -1676,7 +1678,7 @@ static s32 igb_get_phy_cfg_done(struct e1000_hw *hw) } /** - * igb_release_phy - Generic release PHY + * e1000_release_phy - Generic release PHY * @hw: pointer to the HW structure * * Return if silicon family does not require a semaphore when accessing the @@ -1689,7 +1691,7 @@ static void igb_release_phy(struct e1000_hw *hw) } /** - * igb_acquire_phy - Generic acquire PHY + * e1000_acquire_phy - Generic acquire PHY * @hw: pointer to the HW structure * * Return success if silicon family does not require a semaphore when @@ -1704,7 +1706,7 @@ static s32 igb_acquire_phy(struct e1000_hw *hw) } /** - * igb_phy_force_speed_duplex - Generic force PHY speed/duplex + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex * @hw: pointer to the HW structure * * When the silicon family has not implemented a forced speed/duplex @@ -1719,14 +1721,14 @@ s32 igb_phy_force_speed_duplex(struct e1000_hw *hw) } /** - * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY * @hw: pointer to the HW structure * * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. **/ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) { - hw_dbg("Running IGP 3 PHY init script\n"); + hw_dbg(hw, "Running IGP 3 PHY init script\n"); /* PHY init IGP 3 */ /* Enable rise/fall, 10-mode work in class-A */ diff --git a/trunk/drivers/net/igb/igb.h b/trunk/drivers/net/igb/igb.h index 2c48eec17660..0eecb8b2abd2 100644 --- a/trunk/drivers/net/igb/igb.h +++ b/trunk/drivers/net/igb/igb.h @@ -150,7 +150,6 @@ struct igb_ring { u16 itr_register; u16 cpu; - int queue_index; unsigned int total_bytes; unsigned int total_packets; @@ -266,7 +265,6 @@ struct igb_adapter { int msg_enable; struct msix_entry *msix_entries; u32 eims_enable_mask; - u32 eims_other; /* to not mess up cache alignment, always add to the bottom */ unsigned long state; diff --git a/trunk/drivers/net/igb/igb_main.c b/trunk/drivers/net/igb/igb_main.c index 7bc6fae182a7..171d1fc1fbf8 100644 --- a/trunk/drivers/net/igb/igb_main.c +++ b/trunk/drivers/net/igb/igb_main.c @@ -71,8 +71,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *); static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); -static void igb_free_tx_resources(struct igb_ring *); -static void igb_free_rx_resources(struct igb_ring *); +static void igb_free_tx_resources(struct igb_adapter *, struct igb_ring *); +static void igb_free_rx_resources(struct igb_adapter *, struct igb_ring *); void igb_update_stats(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); @@ -84,8 +84,8 @@ static void igb_configure_rx(struct igb_adapter *); static void igb_setup_rctl(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); -static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); +static void igb_clean_tx_ring(struct igb_adapter *, struct igb_ring *); +static void igb_clean_rx_ring(struct igb_adapter *, struct igb_ring *); static void igb_set_multi(struct net_device *); static void igb_update_phy_info(unsigned long); static void igb_watchdog(unsigned long); @@ -102,10 +102,12 @@ static irqreturn_t igb_msix_other(int irq, void *); static irqreturn_t igb_msix_rx(int irq, void *); static irqreturn_t igb_msix_tx(int irq, void *); static int igb_clean_rx_ring_msix(struct napi_struct *, int); -static bool igb_clean_tx_irq(struct igb_ring *); +static bool igb_clean_tx_irq(struct igb_adapter *, struct igb_ring *); static int igb_clean(struct napi_struct *, int); -static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); -static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); +static bool igb_clean_rx_irq_adv(struct igb_adapter *, + struct igb_ring *, int *, int); +static void igb_alloc_rx_buffers_adv(struct igb_adapter *, + struct igb_ring *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); @@ -227,11 +229,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = &(adapter->rx_ring[i]); ring->adapter = adapter; - ring->queue_index = i; ring->itr_register = E1000_ITR; - /* set a default napi handler for each rx_ring */ - netif_napi_add(adapter->netdev, &ring->napi, igb_clean, 64); + if (!ring->napi.poll) + netif_napi_add(adapter->netdev, &ring->napi, igb_clean, + adapter->napi.weight / + adapter->num_rx_queues); } return 0; } @@ -299,6 +302,9 @@ static void igb_configure_msix(struct igb_adapter *adapter) array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + /* disable IAM for ICR interrupt bits */ + wr32(E1000_IAM, 0); + tmp = rd32(E1000_CTRL_EXT); /* enable MSI-X PBA support*/ tmp |= E1000_CTRL_EXT_PBA_CLR; @@ -309,7 +315,6 @@ static void igb_configure_msix(struct igb_adapter *adapter) wr32(E1000_CTRL_EXT, tmp); adapter->eims_enable_mask |= E1000_EIMS_OTHER; - adapter->eims_other = E1000_EIMS_OTHER; wrfl(); } @@ -352,9 +357,6 @@ static int igb_request_msix(struct igb_adapter *adapter) goto out; ring->itr_register = E1000_EITR(0) + (vector << 2); ring->itr_val = adapter->itr; - /* overwrite the poll routine for MSIX, we've already done - * netif_napi_add */ - ring->napi.poll = &igb_clean_rx_ring_msix; vector++; } @@ -363,6 +365,9 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto out; + adapter->napi.poll = igb_clean_rx_ring_msix; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].napi.poll = adapter->napi.poll; igb_configure_msix(adapter); return 0; out: @@ -431,8 +436,12 @@ static int igb_request_irq(struct igb_adapter *adapter) if (adapter->msix_entries) { err = igb_request_msix(adapter); - if (!err) + if (!err) { + /* enable IAM, auto-mask, + * DO NOT USE EIAM or IAM in legacy mode */ + wr32(E1000_IAM, IMS_ENABLE_MASK); goto request_done; + } /* fall back to MSI */ igb_reset_interrupt_capability(adapter); if (!pci_enable_msi(adapter->pdev)) @@ -441,11 +450,7 @@ static int igb_request_irq(struct igb_adapter *adapter) igb_free_all_rx_resources(adapter); adapter->num_rx_queues = 1; igb_alloc_queues(adapter); - } else { - wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0 | - E1000_EIMS_OTHER)); } - if (adapter->msi_enabled) { err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, netdev->name, netdev); @@ -497,12 +502,9 @@ static void igb_irq_disable(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { - wr32(E1000_EIAM, 0); wr32(E1000_EIMC, ~0); wr32(E1000_EIAC, 0); } - - wr32(E1000_IAM, 0); wr32(E1000_IMC, ~0); wrfl(); synchronize_irq(adapter->pdev->irq); @@ -517,14 +519,13 @@ static void igb_irq_enable(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) { - wr32(E1000_EIAC, adapter->eims_enable_mask); - wr32(E1000_EIAM, adapter->eims_enable_mask); - wr32(E1000_EIMS, adapter->eims_enable_mask); + wr32(E1000_EIMS, + adapter->eims_enable_mask); + wr32(E1000_EIAC, + adapter->eims_enable_mask); wr32(E1000_IMS, E1000_IMS_LSC); - } else { - wr32(E1000_IMS, IMS_ENABLE_MASK); - wr32(E1000_IAM, IMS_ENABLE_MASK); - } + } else + wr32(E1000_IMS, IMS_ENABLE_MASK); } static void igb_update_mng_vlan(struct igb_adapter *adapter) @@ -631,15 +632,12 @@ static void igb_configure(struct igb_adapter *adapter) igb_configure_tx(adapter); igb_setup_rctl(adapter); igb_configure_rx(adapter); - - igb_rx_fifo_flush_82575(&adapter->hw); - /* call IGB_DESC_UNUSED which always leaves * at least 1 descriptor unused to make sure * next_to_use != next_to_clean */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = &adapter->rx_ring[i]; - igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring)); + igb_alloc_rx_buffers_adv(adapter, ring, IGB_DESC_UNUSED(ring)); } @@ -662,10 +660,13 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_rx_queues; i++) - napi_enable(&adapter->rx_ring[i].napi); - if (adapter->msix_entries) + napi_enable(&adapter->napi); + + if (adapter->msix_entries) { + for (i = 0; i < adapter->num_rx_queues; i++) + napi_enable(&adapter->rx_ring[i].napi); igb_configure_msix(adapter); + } /* Clear any pending interrupts. */ rd32(E1000_ICR); @@ -702,9 +703,11 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); - for (i = 0; i < adapter->num_rx_queues; i++) - napi_disable(&adapter->rx_ring[i].napi); + napi_disable(&adapter->napi); + if (adapter->msix_entries) + for (i = 0; i < adapter->num_rx_queues; i++) + napi_disable(&adapter->rx_ring[i].napi); igb_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); @@ -851,6 +854,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, struct e1000_hw *hw; const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; unsigned long mmio_start, mmio_len; + static int cards_found; int i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = IGB_EEPROM_APME; @@ -929,6 +933,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, igb_set_ethtool_ops(netdev); netdev->tx_timeout = &igb_tx_timeout; netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, igb_clean, 64); netdev->vlan_rx_register = igb_vlan_rx_register; netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid; netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid; @@ -942,6 +947,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; + adapter->bd_number = cards_found; + /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; @@ -1126,6 +1133,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->msi_enabled ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); + cards_found++; return 0; err_register: @@ -1293,14 +1301,15 @@ static int igb_open(struct net_device *netdev) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_rx_queues; i++) - napi_enable(&adapter->rx_ring[i].napi); - - /* Clear any pending interrupts. */ - rd32(E1000_ICR); + napi_enable(&adapter->napi); + if (adapter->msix_entries) + for (i = 0; i < adapter->num_rx_queues; i++) + napi_enable(&adapter->rx_ring[i].napi); igb_irq_enable(adapter); + /* Clear any pending interrupts. */ + rd32(E1000_ICR); /* Fire a link status change interrupt to start the watchdog. */ wr32(E1000_ICS, E1000_ICS_LSC); @@ -1414,7 +1423,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) dev_err(&adapter->pdev->dev, "Allocation for Tx Queue %u failed\n", i); for (i--; i >= 0; i--) - igb_free_tx_resources(&adapter->tx_ring[i]); + igb_free_tx_resources(adapter, + &adapter->tx_ring[i]); break; } } @@ -1528,6 +1538,8 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, rx_ring->pending_skb = NULL; rx_ring->adapter = adapter; + /* FIXME: do we want to setup ring->napi->poll here? */ + rx_ring->napi.poll = adapter->napi.poll; return 0; @@ -1555,7 +1567,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter) dev_err(&adapter->pdev->dev, "Allocation for Rx Queue %u failed\n", i); for (i--; i >= 0; i--) - igb_free_rx_resources(&adapter->rx_ring[i]); + igb_free_rx_resources(adapter, + &adapter->rx_ring[i]); break; } } @@ -1783,11 +1796,12 @@ static void igb_configure_rx(struct igb_adapter *adapter) * * Free all transmit software resources **/ -static void igb_free_tx_resources(struct igb_ring *tx_ring) +static void igb_free_tx_resources(struct igb_adapter *adapter, + struct igb_ring *tx_ring) { - struct pci_dev *pdev = tx_ring->adapter->pdev; + struct pci_dev *pdev = adapter->pdev; - igb_clean_tx_ring(tx_ring); + igb_clean_tx_ring(adapter, tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; @@ -1808,7 +1822,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_free_tx_resources(&adapter->tx_ring[i]); + igb_free_tx_resources(adapter, &adapter->tx_ring[i]); } static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, @@ -1834,9 +1848,9 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, * @adapter: board private structure * @tx_ring: ring to be cleaned **/ -static void igb_clean_tx_ring(struct igb_ring *tx_ring) +static void igb_clean_tx_ring(struct igb_adapter *adapter, + struct igb_ring *tx_ring) { - struct igb_adapter *adapter = tx_ring->adapter; struct igb_buffer *buffer_info; unsigned long size; unsigned int i; @@ -1873,7 +1887,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_ring(&adapter->tx_ring[i]); + igb_clean_tx_ring(adapter, &adapter->tx_ring[i]); } /** @@ -1883,11 +1897,12 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) * * Free all receive software resources **/ -static void igb_free_rx_resources(struct igb_ring *rx_ring) +static void igb_free_rx_resources(struct igb_adapter *adapter, + struct igb_ring *rx_ring) { - struct pci_dev *pdev = rx_ring->adapter->pdev; + struct pci_dev *pdev = adapter->pdev; - igb_clean_rx_ring(rx_ring); + igb_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; @@ -1908,7 +1923,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_free_rx_resources(&adapter->rx_ring[i]); + igb_free_rx_resources(adapter, &adapter->rx_ring[i]); } /** @@ -1916,9 +1931,9 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * @adapter: board private structure * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +static void igb_clean_rx_ring(struct igb_adapter *adapter, + struct igb_ring *rx_ring) { - struct igb_adapter *adapter = rx_ring->adapter; struct igb_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; @@ -1982,7 +1997,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_ring(&adapter->rx_ring[i]); + igb_clean_rx_ring(adapter, &adapter->rx_ring[i]); } /** @@ -3026,19 +3041,26 @@ static irqreturn_t igb_msix_other(int irq, void *data) struct net_device *netdev = data; struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 icr = rd32(E1000_ICR); + u32 eicr; + /* disable interrupts from the "other" bit, avoid re-entry */ + wr32(E1000_EIMC, E1000_EIMS_OTHER); - /* reading ICR causes bit 31 of EICR to be cleared */ - if (!(icr & E1000_ICR_LSC)) - goto no_link_interrupt; - hw->mac.get_link_status = 1; - /* guard against interrupt when we're going down */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, jiffies + 1); + eicr = rd32(E1000_EICR); + + if (eicr & E1000_EIMS_OTHER) { + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } no_link_interrupt: wr32(E1000_IMS, E1000_IMS_LSC); - wr32(E1000_EIMS, adapter->eims_other); + wr32(E1000_EIMS, E1000_EIMS_OTHER); return IRQ_HANDLED; } @@ -3054,7 +3076,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data) tx_ring->total_bytes = 0; tx_ring->total_packets = 0; - if (!igb_clean_tx_irq(tx_ring)) + if (!igb_clean_tx_irq(adapter, tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ wr32(E1000_EICS, tx_ring->eims_value); @@ -3069,18 +3091,20 @@ static irqreturn_t igb_msix_rx(int irq, void *data) struct igb_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - /* Write the ITR value calculated at the end of the - * previous interrupt. - */ - - if (adapter->set_itr) { - wr32(rx_ring->itr_register, - 1000000000 / (rx_ring->itr_val * 256)); - adapter->set_itr = 0; - } + if (!rx_ring->itr_val) + wr32(E1000_EIMC, rx_ring->eims_value); - if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) + if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) { + rx_ring->total_bytes = 0; + rx_ring->total_packets = 0; + rx_ring->no_itr_adjust = 0; __netif_rx_schedule(adapter->netdev, &rx_ring->napi); + } else { + if (!rx_ring->no_itr_adjust) { + igb_lower_rx_eitr(adapter, rx_ring); + rx_ring->no_itr_adjust = 1; + } + } return IRQ_HANDLED; } @@ -3095,6 +3119,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) { struct net_device *netdev = data; struct igb_adapter *adapter = netdev_priv(netdev); + struct napi_struct *napi = &adapter->napi; struct e1000_hw *hw = &adapter->hw; /* read ICR disables interrupts using IAM */ u32 icr = rd32(E1000_ICR); @@ -3103,17 +3128,25 @@ static irqreturn_t igb_intr_msi(int irq, void *data) * previous interrupt. */ if (adapter->set_itr) { - wr32(E1000_ITR, 1000000000 / (adapter->itr * 256)); + wr32(E1000_ITR, + 1000000000 / (adapter->itr * 256)); adapter->set_itr = 0; } + /* read ICR disables interrupts using IAM */ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { hw->mac.get_link_status = 1; if (!test_bit(__IGB_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); + if (netif_rx_schedule_prep(netdev, napi)) { + adapter->tx_ring->total_bytes = 0; + adapter->tx_ring->total_packets = 0; + adapter->rx_ring->total_bytes = 0; + adapter->rx_ring->total_packets = 0; + __netif_rx_schedule(netdev, napi); + } return IRQ_HANDLED; } @@ -3127,6 +3160,7 @@ static irqreturn_t igb_intr(int irq, void *data) { struct net_device *netdev = data; struct igb_adapter *adapter = netdev_priv(netdev); + struct napi_struct *napi = &adapter->napi; struct e1000_hw *hw = &adapter->hw; /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No * need for the IMC write */ @@ -3139,7 +3173,8 @@ static irqreturn_t igb_intr(int irq, void *data) * previous interrupt. */ if (adapter->set_itr) { - wr32(E1000_ITR, 1000000000 / (adapter->itr * 256)); + wr32(E1000_ITR, + 1000000000 / (adapter->itr * 256)); adapter->set_itr = 0; } @@ -3157,7 +3192,13 @@ static irqreturn_t igb_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } - netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); + if (netif_rx_schedule_prep(netdev, napi)) { + adapter->tx_ring->total_bytes = 0; + adapter->rx_ring->total_bytes = 0; + adapter->tx_ring->total_packets = 0; + adapter->rx_ring->total_packets = 0; + __netif_rx_schedule(netdev, napi); + } return IRQ_HANDLED; } @@ -3186,13 +3227,14 @@ static int igb_clean(struct napi_struct *napi, int budget) * the lock means tx_ring[i] is currently being cleaned anyway. */ for (i = 0; i < adapter->num_tx_queues; i++) { if (spin_trylock(&adapter->tx_ring[i].tx_clean_lock)) { - tx_clean_complete &= igb_clean_tx_irq(&adapter->tx_ring[i]); + tx_clean_complete &= igb_clean_tx_irq(adapter, + &adapter->tx_ring[i]); spin_unlock(&adapter->tx_ring[i].tx_clean_lock); } } for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_irq_adv(&adapter->rx_ring[i], &work_done, + igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i], &work_done, adapter->rx_ring[i].napi.weight); /* If no Tx and not enough Rx work done, exit the polling mode */ @@ -3222,7 +3264,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) if (!netif_carrier_ok(netdev)) goto quit_polling; - igb_clean_rx_irq_adv(rx_ring, &work_done, budget); + igb_clean_rx_irq_adv(adapter, rx_ring, &work_done, budget); /* If not enough Rx work done, exit the polling mode */ @@ -3240,10 +3282,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH) igb_lower_rx_eitr(adapter, rx_ring); } - - if (!test_bit(__IGB_DOWN, &adapter->state)) - wr32(E1000_EIMS, rx_ring->eims_value); - return 0; } @@ -3261,11 +3299,11 @@ static inline u32 get_head(struct igb_ring *tx_ring) * @adapter: board private structure * returns true if ring is completely cleaned **/ -static bool igb_clean_tx_irq(struct igb_ring *tx_ring) +static bool igb_clean_tx_irq(struct igb_adapter *adapter, + struct igb_ring *tx_ring) { - struct igb_adapter *adapter = tx_ring->adapter; - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; struct e1000_tx_desc *tx_desc; struct igb_buffer *buffer_info; struct sk_buff *skb; @@ -3420,10 +3458,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, adapter->hw_csum_good++; } -static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, - int *work_done, int budget) +static bool igb_clean_rx_irq_adv(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int *work_done, int budget) { - struct igb_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_adv_rx_desc *rx_desc , *next_rxd; @@ -3546,7 +3584,8 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IGB_RX_BUFFER_WRITE) { - igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); + igb_alloc_rx_buffers_adv(adapter, rx_ring, + cleaned_count); cleaned_count = 0; } @@ -3561,7 +3600,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, cleaned_count = IGB_DESC_UNUSED(rx_ring); if (cleaned_count) - igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); + igb_alloc_rx_buffers_adv(adapter, rx_ring, cleaned_count); rx_ring->total_packets += total_packets; rx_ring->total_bytes += total_bytes; @@ -3577,10 +3616,10 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split * @adapter: address of board private structure **/ -static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, +static void igb_alloc_rx_buffers_adv(struct igb_adapter *adapter, + struct igb_ring *rx_ring, int cleaned_count) { - struct igb_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_adv_rx_desc *rx_desc; @@ -4023,10 +4062,10 @@ static void igb_netpoll(struct net_device *netdev) igb_irq_disable(adapter); for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_irq(&adapter->tx_ring[i]); + igb_clean_tx_irq(adapter, &adapter->tx_ring[i]); for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_irq_adv(&adapter->rx_ring[i], + igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i], &work_done, adapter->rx_ring[i].napi.weight); diff --git a/trunk/drivers/net/ipg.c b/trunk/drivers/net/ipg.c index 7373dafbb3f7..2c03f4e2ccc4 100644 --- a/trunk/drivers/net/ipg.c +++ b/trunk/drivers/net/ipg.c @@ -42,6 +42,7 @@ #define ipg_r16(reg) ioread16(ioaddr + (reg)) #define ipg_r8(reg) ioread8(ioaddr + (reg)) +#define JUMBO_FRAME_4k_ONLY enum { netdev_io_size = 128 }; @@ -53,14 +54,6 @@ MODULE_AUTHOR("IC Plus Corp. 2003"); MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); MODULE_LICENSE("GPL"); -/* - * Defaults - */ -#define IPG_MAX_RXFRAME_SIZE 0x0600 -#define IPG_RXFRAG_SIZE 0x0600 -#define IPG_RXSUPPORT_SIZE 0x0600 -#define IPG_IS_JUMBO false - /* * Variable record -- index by leading revision/length * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN @@ -638,7 +631,6 @@ static void ipg_nic_set_multicast_list(struct net_device *dev) static int ipg_io_config(struct net_device *dev) { - struct ipg_nic_private *sp = netdev_priv(dev); void __iomem *ioaddr = ipg_ioaddr(dev); u32 origmacctrl; u32 restoremacctrl; @@ -678,7 +670,7 @@ static int ipg_io_config(struct net_device *dev) /* Set RECEIVEMODE register. */ ipg_nic_set_multicast_list(dev); - ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); + ipg_w16(IPG_MAX_RXFRAME_SIZE, MAX_FRAME_SIZE); ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); @@ -738,7 +730,7 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry) IPG_DEBUG_MSG("_get_rxbuff\n"); - skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN); + skb = netdev_alloc_skb(dev, IPG_RXSUPPORT_SIZE + NET_IP_ALIGN); if (!skb) { sp->rx_buff[entry] = NULL; return -ENOMEM; @@ -759,7 +751,7 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry) sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); /* Set the RFD fragment length. */ - rxfragsize = sp->rxfrag_size; + rxfragsize = IPG_RXFRAG_SIZE; rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); return 0; @@ -1084,6 +1076,8 @@ static int ipg_nic_rxrestore(struct net_device *dev) return 0; } +#ifdef JUMBO_FRAME + /* use jumboindex and jumbosize to control jumbo frame status * initial status is jumboindex=-1 and jumbosize=0 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. @@ -1103,7 +1097,7 @@ enum { FRAME_WITH_START_WITH_END = 11 }; -static void ipg_nic_rx_free_skb(struct net_device *dev) +inline void ipg_nic_rx_free_skb(struct net_device *dev) { struct ipg_nic_private *sp = netdev_priv(dev); unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; @@ -1119,7 +1113,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev) } } -static int ipg_nic_rx_check_frame_type(struct net_device *dev) +inline int ipg_nic_rx_check_frame_type(struct net_device *dev) { struct ipg_nic_private *sp = netdev_priv(dev); struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); @@ -1132,7 +1126,7 @@ static int ipg_nic_rx_check_frame_type(struct net_device *dev) return type; } -static int ipg_nic_rx_check_error(struct net_device *dev) +inline int ipg_nic_rx_check_error(struct net_device *dev) { struct ipg_nic_private *sp = netdev_priv(dev); unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; @@ -1215,8 +1209,8 @@ static void ipg_nic_rx_with_start_and_end(struct net_device *dev, /* accept this frame and send to upper layer */ framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; - if (framelen > sp->rxfrag_size) - framelen = sp->rxfrag_size; + if (framelen > IPG_RXFRAG_SIZE) + framelen = IPG_RXFRAG_SIZE; skb_put(skb, framelen); skb->protocol = eth_type_trans(skb, dev); @@ -1249,10 +1243,10 @@ static void ipg_nic_rx_with_start(struct net_device *dev, pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), sp->rx_buf_sz, PCI_DMA_FROMDEVICE); - skb_put(skb, sp->rxfrag_size); + skb_put(skb, IPG_RXFRAG_SIZE); jumbo->found_start = 1; - jumbo->current_size = sp->rxfrag_size; + jumbo->current_size = IPG_RXFRAG_SIZE; jumbo->skb = skb; sp->rx_buff[entry] = NULL; @@ -1278,7 +1272,11 @@ static void ipg_nic_rx_with_end(struct net_device *dev, framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; endframelen = framelen - jumbo->current_size; - if (framelen > sp->rxsupport_size) + /* + if (framelen > IPG_RXFRAG_SIZE) + framelen=IPG_RXFRAG_SIZE; + */ + if (framelen > IPG_RXSUPPORT_SIZE) dev_kfree_skb_irq(jumbo->skb); else { memcpy(skb_put(jumbo->skb, endframelen), @@ -1318,11 +1316,11 @@ static void ipg_nic_rx_no_start_no_end(struct net_device *dev, if (skb) { if (jumbo->found_start) { - jumbo->current_size += sp->rxfrag_size; - if (jumbo->current_size <= sp->rxsupport_size) { + jumbo->current_size += IPG_RXFRAG_SIZE; + if (jumbo->current_size <= IPG_RXSUPPORT_SIZE) { memcpy(skb_put(jumbo->skb, - sp->rxfrag_size), - skb->data, sp->rxfrag_size); + IPG_RXFRAG_SIZE), + skb->data, IPG_RXFRAG_SIZE); } } dev->last_rx = jiffies; @@ -1336,7 +1334,7 @@ static void ipg_nic_rx_no_start_no_end(struct net_device *dev, } } -static int ipg_nic_rx_jumbo(struct net_device *dev) +static int ipg_nic_rx(struct net_device *dev) { struct ipg_nic_private *sp = netdev_priv(dev); unsigned int curr = sp->rx_current; @@ -1384,6 +1382,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev) return 0; } +#else static int ipg_nic_rx(struct net_device *dev) { /* Transfer received Ethernet frames to higher network layers. */ @@ -1414,11 +1413,11 @@ static int ipg_nic_rx(struct net_device *dev) /* Check for jumbo frame arrival with too small * RXFRAG_SIZE. */ - if (framelen > sp->rxfrag_size) { + if (framelen > IPG_RXFRAG_SIZE) { IPG_DEBUG_MSG ("RFS FrameLen > allocated fragment size.\n"); - framelen = sp->rxfrag_size; + framelen = IPG_RXFRAG_SIZE; } if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & @@ -1557,6 +1556,7 @@ static int ipg_nic_rx(struct net_device *dev) return 0; } +#endif static void ipg_reset_after_host_error(struct work_struct *work) { @@ -1592,9 +1592,9 @@ static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) IPG_DEBUG_MSG("_interrupt_handler\n"); - if (sp->is_jumbo) - ipg_nic_rxrestore(dev); - +#ifdef JUMBO_FRAME + ipg_nic_rxrestore(dev); +#endif spin_lock(&sp->lock); /* Get interrupt source information, and acknowledge @@ -1650,10 +1650,7 @@ static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) sp->RFDListCheckedCount++; #endif - if (sp->is_jumbo) - ipg_nic_rx_jumbo(dev); - else - ipg_nic_rx(dev); + ipg_nic_rx(dev); } /* If TxDMAComplete interrupt, free used TFDs. */ @@ -1752,7 +1749,7 @@ static int ipg_nic_open(struct net_device *dev) IPG_DEBUG_MSG("_nic_open\n"); - sp->rx_buf_sz = sp->rxsupport_size; + sp->rx_buf_sz = IPG_RXSUPPORT_SIZE; /* Check for interrupt line conflicts, and request interrupt * line for IPG. @@ -1807,10 +1804,13 @@ static int ipg_nic_open(struct net_device *dev) if (ipg_config_autoneg(dev) < 0) printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name); +#ifdef JUMBO_FRAME /* initialize JUMBO Frame control variable */ sp->jumbo.found_start = 0; sp->jumbo.current_size = 0; sp->jumbo.skb = NULL; + dev->mtu = IPG_TXFRAG_SIZE; +#endif /* Enable transmit and receive operation of the IPG. */ ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & @@ -2119,9 +2119,6 @@ static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) { - struct ipg_nic_private *sp = netdev_priv(dev); - int err; - /* Function to accomodate changes to Maximum Transfer Unit * (or MTU) of IPG NIC. Cannot use default function since * the default will not allow for MTU > 1500 bytes. @@ -2129,33 +2126,16 @@ static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) IPG_DEBUG_MSG("_nic_change_mtu\n"); - /* - * Check that the new MTU value is between 68 (14 byte header, 46 byte - * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. + /* Check that the new MTU value is between 68 (14 byte header, 46 + * byte payload, 4 byte FCS) and IPG_MAX_RXFRAME_SIZE, which + * corresponds to the MAXFRAMESIZE register in the IPG. */ - if (new_mtu < 68 || new_mtu > 10240) + if ((new_mtu < 68) || (new_mtu > IPG_MAX_RXFRAME_SIZE)) return -EINVAL; - err = ipg_nic_stop(dev); - if (err) - return err; - dev->mtu = new_mtu; - sp->max_rxframe_size = new_mtu; - - sp->rxfrag_size = new_mtu; - if (sp->rxfrag_size > 4088) - sp->rxfrag_size = 4088; - - sp->rxsupport_size = sp->max_rxframe_size; - - if (new_mtu > 0x0600) - sp->is_jumbo = true; - else - sp->is_jumbo = false; - - return ipg_nic_open(dev); + return 0; } static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -2260,11 +2240,6 @@ static int __devinit ipg_probe(struct pci_dev *pdev, spin_lock_init(&sp->lock); mutex_init(&sp->mii_mutex); - sp->is_jumbo = IPG_IS_JUMBO; - sp->rxfrag_size = IPG_RXFRAG_SIZE; - sp->rxsupport_size = IPG_RXSUPPORT_SIZE; - sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; - /* Declare IPG NIC functions for Ethernet device methods. */ dev->open = &ipg_nic_open; diff --git a/trunk/drivers/net/ipg.h b/trunk/drivers/net/ipg.h index e0e718ab4c2e..cda53887d4db 100644 --- a/trunk/drivers/net/ipg.h +++ b/trunk/drivers/net/ipg.h @@ -536,6 +536,83 @@ enum ipg_regs { */ #define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 +#ifdef JUMBO_FRAME + +# ifdef JUMBO_FRAME_SIZE_2K +# define JUMBO_FRAME_SIZE 2048 +# define __IPG_RXFRAG_SIZE 2048 +# else +# ifdef JUMBO_FRAME_SIZE_3K +# define JUMBO_FRAME_SIZE 3072 +# define __IPG_RXFRAG_SIZE 3072 +# else +# ifdef JUMBO_FRAME_SIZE_4K +# define JUMBO_FRAME_SIZE 4096 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_5K +# define JUMBO_FRAME_SIZE 5120 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_6K +# define JUMBO_FRAME_SIZE 6144 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_7K +# define JUMBO_FRAME_SIZE 7168 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_8K +# define JUMBO_FRAME_SIZE 8192 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_9K +# define JUMBO_FRAME_SIZE 9216 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_10K +# define JUMBO_FRAME_SIZE 10240 +# define __IPG_RXFRAG_SIZE 4088 +# else +# define JUMBO_FRAME_SIZE 4096 +# endif +# endif +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif + +/* Size of allocated received buffers. Nominally 0x0600. + * Define larger if expecting jumbo frames. + */ +#ifdef JUMBO_FRAME +/* IPG_TXFRAG_SIZE must <= 0x2b00, or TX will crash */ +#define IPG_TXFRAG_SIZE JUMBO_FRAME_SIZE +#endif + +/* Size of allocated received buffers. Nominally 0x0600. + * Define larger if expecting jumbo frames. + */ +#ifdef JUMBO_FRAME +/* 4088 = 4096 - 8 */ +#define IPG_RXFRAG_SIZE __IPG_RXFRAG_SIZE +#define IPG_RXSUPPORT_SIZE IPG_MAX_RXFRAME_SIZE +#else +#define IPG_RXFRAG_SIZE 0x0600 +#define IPG_RXSUPPORT_SIZE IPG_RXFRAG_SIZE +#endif + +/* IPG_MAX_RXFRAME_SIZE <= IPG_RXFRAG_SIZE */ +#ifdef JUMBO_FRAME +#define IPG_MAX_RXFRAME_SIZE JUMBO_FRAME_SIZE +#else +#define IPG_MAX_RXFRAME_SIZE 0x0600 +#endif + #define IPG_RFDLIST_LENGTH 0x100 /* Maximum number of RFDs to process per interrupt. @@ -709,11 +786,9 @@ struct ipg_nic_private { unsigned int tx_dirty; unsigned int rx_current; unsigned int rx_dirty; - bool is_jumbo; +#ifdef JUMBO_FRAME struct ipg_jumbo jumbo; - unsigned long rxfrag_size; - unsigned long rxsupport_size; - unsigned long max_rxframe_size; +#endif unsigned int rx_buf_sz; struct pci_dev *pdev; struct net_device *dev; diff --git a/trunk/drivers/net/ne.c b/trunk/drivers/net/ne.c index 14126973bd12..874d291cbaed 100644 --- a/trunk/drivers/net/ne.c +++ b/trunk/drivers/net/ne.c @@ -217,7 +217,7 @@ static int __init do_ne_probe(struct net_device *dev) #ifndef MODULE struct net_device * __init ne_probe(int unit) { - struct net_device *dev = alloc_eip_netdev(); + struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) @@ -490,7 +490,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) /* Snarf the interrupt now. There's no point in waiting since we cannot share and the board will usually be enabled. */ - ret = request_irq(dev->irq, eip_interrupt, 0, name, dev); + ret = request_irq(dev->irq, ei_interrupt, 0, name, dev); if (ret) { printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); goto err_out; @@ -534,7 +534,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) dev->open = &ne_open; dev->stop = &ne_close; #ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = eip_poll; + dev->poll_controller = ei_poll; #endif NS8390_init(dev, 0); @@ -554,7 +554,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) static int ne_open(struct net_device *dev) { - eip_open(dev); + ei_open(dev); return 0; } @@ -562,7 +562,7 @@ static int ne_close(struct net_device *dev) { if (ei_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); - eip_close(dev); + ei_close(dev); return 0; } @@ -814,7 +814,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) if (!res || irq < 0) return -ENODEV; - dev = alloc_eip_netdev(); + dev = alloc_ei_netdev(); if (!dev) return -ENOMEM; dev->irq = irq; @@ -912,7 +912,7 @@ int __init init_module(void) int plat_found = !ne_init(); for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = alloc_eip_netdev(); + struct net_device *dev = alloc_ei_netdev(); if (!dev) break; dev->irq = irq[this_dev]; diff --git a/trunk/drivers/net/ne2.c b/trunk/drivers/net/ne2.c index 8f7256346922..f4cd8c7e81ba 100644 --- a/trunk/drivers/net/ne2.c +++ b/trunk/drivers/net/ne2.c @@ -280,7 +280,7 @@ static int __init do_ne2_probe(struct net_device *dev) #ifndef MODULE struct net_device * __init ne2_probe(int unit) { - struct net_device *dev = alloc_eip_netdev(); + struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) @@ -457,7 +457,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) /* Snarf the interrupt now. There's no point in waiting since we cannot share and the board will usually be enabled. */ - retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev); + retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { printk (" unable to get IRQ %d (irqval=%d).\n", dev->irq, retval); @@ -497,9 +497,9 @@ static int __init ne2_probe1(struct net_device *dev, int slot) dev->open = &ne_open; dev->stop = &ne_close; #ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = eip_poll; + dev->poll_controller = ei_poll; #endif - NS8390p_init(dev, 0); + NS8390_init(dev, 0); retval = register_netdev(dev); if (retval) @@ -515,7 +515,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) static int ne_open(struct net_device *dev) { - eip_open(dev); + ei_open(dev); return 0; } @@ -523,7 +523,7 @@ static int ne_close(struct net_device *dev) { if (ei_debug > 1) printk("%s: Shutting down ethercard.\n", dev->name); - eip_close(dev); + ei_close(dev); return 0; } @@ -748,7 +748,7 @@ static void ne_block_output(struct net_device *dev, int count, if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ printk("%s: timeout waiting for Tx RDC.\n", dev->name); ne_reset_8390(dev); - NS8390p_init(dev, 1); + NS8390_init(dev,1); break; } @@ -781,7 +781,7 @@ int __init init_module(void) int this_dev, found = 0; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - dev = alloc_eip_netdev(); + dev = alloc_ei_netdev(); if (!dev) break; dev->irq = irq[this_dev]; diff --git a/trunk/drivers/net/niu.c b/trunk/drivers/net/niu.c index de2a8a30199d..918f802fe089 100644 --- a/trunk/drivers/net/niu.c +++ b/trunk/drivers/net/niu.c @@ -6385,162 +6385,6 @@ static int niu_get_eeprom(struct net_device *dev, return 0; } -static int niu_ethflow_to_class(int flow_type, u64 *class) -{ - switch (flow_type) { - case TCP_V4_FLOW: - *class = CLASS_CODE_TCP_IPV4; - break; - case UDP_V4_FLOW: - *class = CLASS_CODE_UDP_IPV4; - break; - case AH_ESP_V4_FLOW: - *class = CLASS_CODE_AH_ESP_IPV4; - break; - case SCTP_V4_FLOW: - *class = CLASS_CODE_SCTP_IPV4; - break; - case TCP_V6_FLOW: - *class = CLASS_CODE_TCP_IPV6; - break; - case UDP_V6_FLOW: - *class = CLASS_CODE_UDP_IPV6; - break; - case AH_ESP_V6_FLOW: - *class = CLASS_CODE_AH_ESP_IPV6; - break; - case SCTP_V6_FLOW: - *class = CLASS_CODE_SCTP_IPV6; - break; - default: - return -1; - } - - return 1; -} - -static u64 niu_flowkey_to_ethflow(u64 flow_key) -{ - u64 ethflow = 0; - - if (flow_key & FLOW_KEY_PORT) - ethflow |= RXH_DEV_PORT; - if (flow_key & FLOW_KEY_L2DA) - ethflow |= RXH_L2DA; - if (flow_key & FLOW_KEY_VLAN) - ethflow |= RXH_VLAN; - if (flow_key & FLOW_KEY_IPSA) - ethflow |= RXH_IP_SRC; - if (flow_key & FLOW_KEY_IPDA) - ethflow |= RXH_IP_DST; - if (flow_key & FLOW_KEY_PROTO) - ethflow |= RXH_L3_PROTO; - if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) - ethflow |= RXH_L4_B_0_1; - if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) - ethflow |= RXH_L4_B_2_3; - - return ethflow; - -} - -static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) -{ - u64 key = 0; - - if (ethflow & RXH_DEV_PORT) - key |= FLOW_KEY_PORT; - if (ethflow & RXH_L2DA) - key |= FLOW_KEY_L2DA; - if (ethflow & RXH_VLAN) - key |= FLOW_KEY_VLAN; - if (ethflow & RXH_IP_SRC) - key |= FLOW_KEY_IPSA; - if (ethflow & RXH_IP_DST) - key |= FLOW_KEY_IPDA; - if (ethflow & RXH_L3_PROTO) - key |= FLOW_KEY_PROTO; - if (ethflow & RXH_L4_B_0_1) - key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); - if (ethflow & RXH_L4_B_2_3) - key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); - - *flow_key = key; - - return 1; - -} - -static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - struct niu *np = netdev_priv(dev); - u64 class; - - cmd->data = 0; - - if (!niu_ethflow_to_class(cmd->flow_type, &class)) - return -EINVAL; - - if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & - TCAM_KEY_DISC) - cmd->data = RXH_DISCARD; - else - - cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - - CLASS_CODE_USER_PROG1]); - return 0; -} - -static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd) -{ - struct niu *np = netdev_priv(dev); - u64 class; - u64 flow_key = 0; - unsigned long flags; - - if (!niu_ethflow_to_class(cmd->flow_type, &class)) - return -EINVAL; - - if (class < CLASS_CODE_USER_PROG1 || - class > CLASS_CODE_SCTP_IPV6) - return -EINVAL; - - if (cmd->data & RXH_DISCARD) { - niu_lock_parent(np, flags); - flow_key = np->parent->tcam_key[class - - CLASS_CODE_USER_PROG1]; - flow_key |= TCAM_KEY_DISC; - nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); - np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; - niu_unlock_parent(np, flags); - return 0; - } else { - /* Discard was set before, but is not set now */ - if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & - TCAM_KEY_DISC) { - niu_lock_parent(np, flags); - flow_key = np->parent->tcam_key[class - - CLASS_CODE_USER_PROG1]; - flow_key &= ~TCAM_KEY_DISC; - nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), - flow_key); - np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = - flow_key; - niu_unlock_parent(np, flags); - } - } - - if (!niu_ethflow_to_flowkey(cmd->data, &flow_key)) - return -EINVAL; - - niu_lock_parent(np, flags); - nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); - np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; - niu_unlock_parent(np, flags); - - return 0; -} - static const struct { const char string[ETH_GSTRING_LEN]; } niu_xmac_stat_keys[] = { @@ -6771,8 +6615,6 @@ static const struct ethtool_ops niu_ethtool_ops = { .get_stats_count = niu_get_stats_count, .get_ethtool_stats = niu_get_ethtool_stats, .phys_id = niu_phys_id, - .get_rxhash = niu_get_hash_opts, - .set_rxhash = niu_set_hash_opts, }; static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index cfe8829ed31f..657242504621 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -28,7 +28,13 @@ #include #include -#define RTL8169_VERSION "2.3LK-NAPI" +#ifdef CONFIG_R8169_NAPI +#define NAPI_SUFFIX "-NAPI" +#else +#define NAPI_SUFFIX "" +#endif + +#define RTL8169_VERSION "2.2LK" NAPI_SUFFIX #define MODULENAME "r8169" #define PFX MODULENAME ": " @@ -51,6 +57,16 @@ #define TX_BUFFS_AVAIL(tp) \ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) +#ifdef CONFIG_R8169_NAPI +#define rtl8169_rx_skb netif_receive_skb +#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb +#define rtl8169_rx_quota(count, quota) min(count, quota) +#else +#define rtl8169_rx_skb netif_rx +#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx +#define rtl8169_rx_quota(count, quota) count +#endif + /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static const int max_interrupt_work = 20; @@ -378,7 +394,9 @@ struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; /* Index of PCI device */ struct net_device *dev; +#ifdef CONFIG_R8169_NAPI struct napi_struct napi; +#endif spinlock_t lock; /* spin lock flag */ u32 msg_enable; int chipset; @@ -440,7 +458,10 @@ static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); static void rtl8169_down(struct net_device *dev); static void rtl8169_rx_clear(struct rtl8169_private *tp); + +#ifdef CONFIG_R8169_NAPI static int rtl8169_poll(struct napi_struct *napi, int budget); +#endif static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); @@ -822,11 +843,10 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); - struct vlan_group *vlgrp = tp->vlgrp; int ret; - if (vlgrp && (opts2 & RxVlanTag)) { - vlan_hwaccel_receive_skb(skb, vlgrp, swab16(opts2 & 0xffff)); + if (tp->vlgrp && (opts2 & RxVlanTag)) { + rtl8169_rx_hwaccel_skb(skb, tp->vlgrp, swab16(opts2 & 0xffff)); ret = 0; } else ret = -1; @@ -1744,7 +1764,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->change_mtu = rtl8169_change_mtu; dev->set_mac_address = rtl_set_mac_address; +#ifdef CONFIG_R8169_NAPI netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); +#endif #ifdef CONFIG_R8169_VLAN dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; @@ -1865,7 +1887,9 @@ static int rtl8169_open(struct net_device *dev) if (retval < 0) goto err_release_ring_2; +#ifdef CONFIG_R8169_NAPI napi_enable(&tp->napi); +#endif rtl_hw_start(dev); @@ -2173,7 +2197,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out; +#ifdef CONFIG_R8169_NAPI napi_enable(&tp->napi); +#endif rtl_hw_start(dev); @@ -2365,13 +2391,17 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) synchronize_irq(dev->irq); /* Wait for any pending NAPI task to complete */ +#ifdef CONFIG_R8169_NAPI napi_disable(&tp->napi); +#endif rtl8169_irq_mask_and_ack(ioaddr); +#ifdef CONFIG_R8169_NAPI tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); napi_enable(&tp->napi); +#endif } static void rtl8169_reinit_task(struct work_struct *work) @@ -2737,7 +2767,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; - rx_left = min(rx_left, budget); + rx_left = rtl8169_rx_quota(rx_left, budget); for (; rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; @@ -2799,7 +2829,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, skb->protocol = eth_type_trans(skb, dev); if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) - netif_receive_skb(skb); + rtl8169_rx_skb(skb); dev->last_rx = jiffies; dev->stats.rx_bytes += pkt_size; @@ -2839,61 +2869,87 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct rtl8169_private *tp = netdev_priv(dev); + int boguscnt = max_interrupt_work; void __iomem *ioaddr = tp->mmio_addr; - int handled = 0; int status; + int handled = 0; - status = RTL_R16(IntrStatus); + do { + status = RTL_R16(IntrStatus); - /* hotplug/major error/no more work/shared irq */ - if ((status == 0xffff) || !status) - goto out; + /* hotplug/major error/no more work/shared irq */ + if ((status == 0xFFFF) || !status) + break; - handled = 1; + handled = 1; - if (unlikely(!netif_running(dev))) { - rtl8169_asic_down(ioaddr); - goto out; - } + if (unlikely(!netif_running(dev))) { + rtl8169_asic_down(ioaddr); + goto out; + } - status &= tp->intr_mask; - RTL_W16(IntrStatus, - (status & RxFIFOOver) ? (status | RxOverflow) : status); + status &= tp->intr_mask; + RTL_W16(IntrStatus, + (status & RxFIFOOver) ? (status | RxOverflow) : status); - if (!(status & tp->intr_event)) - goto out; + if (!(status & tp->intr_event)) + break; - /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(dev); - rtl8169_tx_timeout(dev); - goto out; - } + /* Work around for rx fifo overflow */ + if (unlikely(status & RxFIFOOver) && + (tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + break; + } - if (unlikely(status & SYSErr)) { - rtl8169_pcierr_interrupt(dev); - goto out; - } + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(dev); + break; + } - if (status & LinkChg) - rtl8169_check_link_status(dev, tp, ioaddr); + if (status & LinkChg) + rtl8169_check_link_status(dev, tp, ioaddr); - if (status & tp->napi_event) { - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; +#ifdef CONFIG_R8169_NAPI + if (status & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; if (likely(netif_rx_schedule_prep(dev, &tp->napi))) __netif_rx_schedule(dev, &tp->napi); - else if (netif_msg_intr(tp)) { - printk(KERN_INFO "%s: interrupt %04x in poll\n", - dev->name, status); + else if (netif_msg_intr(tp)) { + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } } + break; +#else + /* Rx interrupt */ + if (status & (RxOK | RxOverflow | RxFIFOOver)) + rtl8169_rx_interrupt(dev, tp, ioaddr, ~(u32)0); + + /* Tx interrupt */ + if (status & (TxOK | TxErr)) + rtl8169_tx_interrupt(dev, tp, ioaddr); +#endif + + boguscnt--; + } while (boguscnt > 0); + + if (boguscnt <= 0) { + if (netif_msg_intr(tp) && net_ratelimit() ) { + printk(KERN_WARNING + "%s: Too much work at interrupt!\n", dev->name); + } + /* Clear all interrupt sources. */ + RTL_W16(IntrStatus, 0xffff); } out: return IRQ_RETVAL(handled); } +#ifdef CONFIG_R8169_NAPI static int rtl8169_poll(struct napi_struct *napi, int budget) { struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); @@ -2919,6 +2975,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) return work_done; } +#endif static void rtl8169_down(struct net_device *dev) { @@ -2930,7 +2987,9 @@ static void rtl8169_down(struct net_device *dev) netif_stop_queue(dev); +#ifdef CONFIG_R8169_NAPI napi_disable(&tp->napi); +#endif core_down: spin_lock_irq(&tp->lock); @@ -3039,10 +3098,8 @@ static void rtl_set_rx_mode(struct net_device *dev) (tp->mac_version == RTL_GIGA_MAC_VER_15) || (tp->mac_version == RTL_GIGA_MAC_VER_16) || (tp->mac_version == RTL_GIGA_MAC_VER_17)) { - u32 data = mc_filter[0]; - - mc_filter[0] = swab32(mc_filter[1]); - mc_filter[1] = swab32(data); + mc_filter[0] = 0xffffffff; + mc_filter[1] = 0xffffffff; } RTL_W32(MAR0 + 0, mc_filter[0]); diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index aa4ee4439f04..7ab94c825b57 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -63,7 +63,6 @@ #include #include #include -#include #include #include @@ -284,7 +283,6 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, struct tun_pi pi = { 0, __constant_htons(ETH_P_IP) }; struct sk_buff *skb; size_t len = count, align = 0; - struct virtio_net_hdr gso = { 0 }; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) > count) @@ -294,17 +292,6 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, return -EFAULT; } - if (tun->flags & TUN_VNET_HDR) { - if ((len -= sizeof(gso)) > count) - return -EINVAL; - - if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso))) - return -EFAULT; - - if (gso.hdr_len > len) - return -EINVAL; - } - if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { align = NET_IP_ALIGN; if (unlikely(len < ETH_HLEN)) @@ -324,16 +311,6 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, return -EFAULT; } - if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - if (!skb_partial_csum_set(skb, gso.csum_start, - gso.csum_offset)) { - tun->dev->stats.rx_frame_errors++; - kfree_skb(skb); - return -EINVAL; - } - } else if (tun->flags & TUN_NOCHECKSUM) - skb->ip_summed = CHECKSUM_UNNECESSARY; - switch (tun->flags & TUN_TYPE_MASK) { case TUN_TUN_DEV: if (tun->flags & TUN_NO_PI) { @@ -360,35 +337,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, break; }; - if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { - pr_debug("GSO!\n"); - switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { - case VIRTIO_NET_HDR_GSO_TCPV4: - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - break; - case VIRTIO_NET_HDR_GSO_TCPV6: - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - break; - default: - tun->dev->stats.rx_frame_errors++; - kfree_skb(skb); - return -EINVAL; - } - - if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) - skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - - skb_shinfo(skb)->gso_size = gso.gso_size; - if (skb_shinfo(skb)->gso_size == 0) { - tun->dev->stats.rx_frame_errors++; - kfree_skb(skb); - return -EINVAL; - } - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; - } + if (tun->flags & TUN_NOCHECKSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx_ni(skb); tun->dev->last_rx = jiffies; @@ -434,39 +384,6 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, total += sizeof(pi); } - if (tun->flags & TUN_VNET_HDR) { - struct virtio_net_hdr gso = { 0 }; /* no info leak */ - if ((len -= sizeof(gso)) < 0) - return -EINVAL; - - if (skb_is_gso(skb)) { - struct skb_shared_info *sinfo = skb_shinfo(skb); - - /* This is a hint as to how much should be linear. */ - gso.hdr_len = skb_headlen(skb); - gso.gso_size = sinfo->gso_size; - if (sinfo->gso_type & SKB_GSO_TCPV4) - gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; - else if (sinfo->gso_type & SKB_GSO_TCPV6) - gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else - BUG(); - if (sinfo->gso_type & SKB_GSO_TCP_ECN) - gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; - } else - gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = skb->csum_start - skb_headroom(skb); - gso.csum_offset = skb->csum_offset; - } /* else everything is zero */ - - if (unlikely(memcpy_toiovec(iv, (void *)&gso, sizeof(gso)))) - return -EFAULT; - total += sizeof(gso); - } - len = min_t(int, skb->len, len); skb_copy_datagram_iovec(skb, 0, iv, len); @@ -681,11 +598,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else tun->flags &= ~TUN_ONE_QUEUE; - if (ifr->ifr_flags & IFF_VNET_HDR) - tun->flags |= TUN_VNET_HDR; - else - tun->flags &= ~TUN_VNET_HDR; - file->private_data = tun; tun->attached = 1; get_net(dev_net(tun->dev)); @@ -699,46 +611,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) return err; } -/* This is like a cut-down ethtool ops, except done via tun fd so no - * privs required. */ -static int set_offload(struct net_device *dev, unsigned long arg) -{ - unsigned int old_features, features; - - old_features = dev->features; - /* Unset features, set them as we chew on the arg. */ - features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST - |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6)); - - if (arg & TUN_F_CSUM) { - features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; - arg &= ~TUN_F_CSUM; - - if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { - if (arg & TUN_F_TSO_ECN) { - features |= NETIF_F_TSO_ECN; - arg &= ~TUN_F_TSO_ECN; - } - if (arg & TUN_F_TSO4) - features |= NETIF_F_TSO; - if (arg & TUN_F_TSO6) - features |= NETIF_F_TSO6; - arg &= ~(TUN_F_TSO4|TUN_F_TSO6); - } - } - - /* This gives the user a way to test for new features in future by - * trying to set them. */ - if (arg) - return -EINVAL; - - dev->features = features; - if (old_features != dev->features) - netdev_features_change(dev); - - return 0; -} - static int tun_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { @@ -768,15 +640,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, return 0; } - if (cmd == TUNGETFEATURES) { - /* Currently this just means: "what IFF flags are valid?". - * This is needed because we never checked for invalid flags on - * TUNSETIFF. */ - return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | - IFF_VNET_HDR, - (unsigned int __user*)argp); - } - if (!tun) return -EBADFD; @@ -844,15 +707,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, break; #endif - case TUNSETOFFLOAD: - { - int ret; - rtnl_lock(); - ret = set_offload(tun->dev, arg); - rtnl_unlock(); - return ret; - } - case SIOCGIFFLAGS: ifr.ifr_flags = tun->if_flags; if (copy_to_user( argp, &ifr, sizeof ifr)) diff --git a/trunk/drivers/net/wan/c101.c b/trunk/drivers/net/wan/c101.c index c8e563106a4a..c2cc42f723d5 100644 --- a/trunk/drivers/net/wan/c101.c +++ b/trunk/drivers/net/wan/c101.c @@ -133,9 +133,9 @@ static void sca_msci_intr(port_t *port) sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); if (stat & ST1_UDRN) { - /* TX Underrun error detected */ - port_to_dev(port)->stats.tx_errors++; - port_to_dev(port)->stats.tx_fifo_errors++; + struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); + stats->tx_errors++; /* TX Underrun error detected */ + stats->tx_fifo_errors++; } stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ diff --git a/trunk/drivers/net/wan/dscc4.c b/trunk/drivers/net/wan/dscc4.c index 50ef5b4efd6d..c6f26e28e376 100644 --- a/trunk/drivers/net/wan/dscc4.c +++ b/trunk/drivers/net/wan/dscc4.c @@ -642,6 +642,7 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) { struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; + struct net_device_stats *stats = hdlc_stats(dev); struct pci_dev *pdev = dpriv->pci_priv->pdev; struct sk_buff *skb; int pkt_len; @@ -655,8 +656,8 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; + stats->rx_packets++; + stats->rx_bytes += pkt_len; skb_put(skb, pkt_len); if (netif_running(dev)) skb->protocol = hdlc_type_trans(skb, dev); @@ -664,13 +665,13 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, netif_rx(skb); } else { if (skb->data[pkt_len] & FrameRdo) - dev->stats.rx_fifo_errors++; + stats->rx_fifo_errors++; else if (!(skb->data[pkt_len] | ~FrameCrc)) - dev->stats.rx_crc_errors++; + stats->rx_crc_errors++; else if (!(skb->data[pkt_len] | ~(FrameVfr | FrameRab))) - dev->stats.rx_length_errors++; + stats->rx_length_errors++; else - dev->stats.rx_errors++; + stats->rx_errors++; dev_kfree_skb_irq(skb); } refill: @@ -1568,6 +1569,7 @@ static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, if (state & SccEvt) { if (state & Alls) { + struct net_device_stats *stats = hdlc_stats(dev); struct sk_buff *skb; struct TxFD *tx_fd; @@ -1584,8 +1586,8 @@ static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data), skb->len, PCI_DMA_TODEVICE); if (tx_fd->state & FrameEnd) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; } dev_kfree_skb_irq(skb); dpriv->tx_skbuff[cur] = NULL; @@ -1696,7 +1698,7 @@ static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, } if (state & Err) { printk(KERN_INFO "%s: Tx ERR\n", dev->name); - dev->stats.tx_errors++; + hdlc_stats(dev)->tx_errors++; state &= ~Err; } } @@ -1832,7 +1834,7 @@ static void dscc4_rx_irq(struct dscc4_pci_priv *priv, if (!(rx_fd->state2 & DataComplete)) break; if (rx_fd->state2 & FrameAborted) { - dev->stats.rx_over_errors++; + hdlc_stats(dev)->rx_over_errors++; rx_fd->state1 |= Hold; rx_fd->state2 = 0x00000000; rx_fd->end = cpu_to_le32(0xbabeface); diff --git a/trunk/drivers/net/wan/farsync.c b/trunk/drivers/net/wan/farsync.c index 754f00809e3e..547368e9633d 100644 --- a/trunk/drivers/net/wan/farsync.c +++ b/trunk/drivers/net/wan/farsync.c @@ -845,6 +845,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, int len, int txpos) { struct net_device *dev = port_to_dev(port); + struct net_device_stats *stats = hdlc_stats(dev); /* * Everything is now set, just tell the card to go @@ -852,8 +853,8 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, dbg(DBG_TX, "fst_tx_dma_complete\n"); FST_WRB(card, txDescrRing[port->index][txpos].bits, DMA_OWN | TX_STP | TX_ENP); - dev->stats.tx_packets++; - dev->stats.tx_bytes += len; + stats->tx_packets++; + stats->tx_bytes += len; dev->trans_start = jiffies; } @@ -875,6 +876,7 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, int len, struct sk_buff *skb, int rxp) { struct net_device *dev = port_to_dev(port); + struct net_device_stats *stats = hdlc_stats(dev); int pi; int rx_status; @@ -886,8 +888,8 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); /* Update stats */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + stats->rx_packets++; + stats->rx_bytes += len; /* Push upstream */ dbg(DBG_RX, "Pushing the frame up the stack\n"); @@ -898,7 +900,7 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, rx_status = netif_rx(skb); fst_process_rx_status(rx_status, port_to_dev(port)->name); if (rx_status == NET_RX_DROP) - dev->stats.rx_dropped++; + stats->rx_dropped++; dev->last_rx = jiffies; } @@ -1161,28 +1163,29 @@ fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port, unsigned char dmabits, int rxp, unsigned short len) { struct net_device *dev = port_to_dev(port); + struct net_device_stats *stats = hdlc_stats(dev); - /* + /* * Increment the appropriate error counter */ - dev->stats.rx_errors++; + stats->rx_errors++; if (dmabits & RX_OFLO) { - dev->stats.rx_fifo_errors++; + stats->rx_fifo_errors++; dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n", card->card_no, port->index, rxp); } if (dmabits & RX_CRC) { - dev->stats.rx_crc_errors++; + stats->rx_crc_errors++; dbg(DBG_ASS, "Rx crc error on card %d port %d\n", card->card_no, port->index); } if (dmabits & RX_FRAM) { - dev->stats.rx_frame_errors++; + stats->rx_frame_errors++; dbg(DBG_ASS, "Rx frame error on card %d port %d\n", card->card_no, port->index); } if (dmabits == (RX_STP | RX_ENP)) { - dev->stats.rx_length_errors++; + stats->rx_length_errors++; dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n", len, card->card_no, port->index); } @@ -1239,6 +1242,7 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port) unsigned short len; struct sk_buff *skb; struct net_device *dev = port_to_dev(port); + struct net_device_stats *stats = hdlc_stats(dev); /* Check we have a buffer to process */ pi = port->index; @@ -1287,7 +1291,7 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port) if ((skb = dev_alloc_skb(len)) == NULL) { dbg(DBG_RX, "intr_rx: can't allocate buffer\n"); - dev->stats.rx_dropped++; + stats->rx_dropped++; /* Return descriptor to card */ FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); @@ -1312,8 +1316,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port) FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN); /* Update stats */ - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; + stats->rx_packets++; + stats->rx_bytes += len; /* Push upstream */ dbg(DBG_RX, "Pushing frame up the stack\n"); @@ -1323,8 +1327,9 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port) skb->protocol = hdlc_type_trans(skb, dev); rx_status = netif_rx(skb); fst_process_rx_status(rx_status, port_to_dev(port)->name); - if (rx_status == NET_RX_DROP) - dev->stats.rx_dropped++; + if (rx_status == NET_RX_DROP) { + stats->rx_dropped++; + } dev->last_rx = jiffies; } else { card->dma_skb_rx = skb; @@ -1356,6 +1361,7 @@ do_bottom_half_tx(struct fst_card_info *card) struct sk_buff *skb; unsigned long flags; struct net_device *dev; + struct net_device_stats *stats; /* * Find a free buffer for the transmit @@ -1367,10 +1373,12 @@ do_bottom_half_tx(struct fst_card_info *card) if (!port->run) continue; - dev = port_to_dev(port); - while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) & - DMA_OWN) - && !(card->dmatx_in_progress)) { + dev = port_to_dev(port); + stats = hdlc_stats(dev); + while (! + (FST_RDB(card, txDescrRing[pi][port->txpos].bits) & + DMA_OWN) + && !(card->dmatx_in_progress)) { /* * There doesn't seem to be a txdone event per-se * We seem to have to deduce it, by checking the DMA_OWN @@ -1414,8 +1422,8 @@ do_bottom_half_tx(struct fst_card_info *card) txDescrRing[pi][port->txpos]. bits, DMA_OWN | TX_STP | TX_ENP); - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; dev->trans_start = jiffies; } else { /* Or do it through dma */ @@ -1620,8 +1628,8 @@ fst_intr(int dummy, void *dev_id) * always load up the entire packet for DMA. */ dbg(DBG_TX, "Tx underflow port %d\n", port->index); - port_to_dev(port)->stats.tx_errors++; - port_to_dev(port)->stats.tx_fifo_errors++; + hdlc_stats(port_to_dev(port))->tx_errors++; + hdlc_stats(port_to_dev(port))->tx_fifo_errors++; dbg(DBG_ASS, "Tx underflow on card %d port %d\n", card->card_no, port->index); break; @@ -2284,11 +2292,12 @@ fst_tx_timeout(struct net_device *dev) { struct fst_port_info *port; struct fst_card_info *card; + struct net_device_stats *stats = hdlc_stats(dev); port = dev_to_port(dev); card = port->card; - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + stats->tx_errors++; + stats->tx_aborted_errors++; dbg(DBG_ASS, "Tx timeout card %d port %d\n", card->card_no, port->index); fst_issue_cmd(port, ABORTTX); @@ -2303,6 +2312,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fst_card_info *card; struct fst_port_info *port; + struct net_device_stats *stats = hdlc_stats(dev); unsigned long flags; int txq_length; @@ -2313,8 +2323,8 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Drop packet with error if we don't have carrier */ if (!netif_carrier_ok(dev)) { dev_kfree_skb(skb); - dev->stats.tx_errors++; - dev->stats.tx_carrier_errors++; + stats->tx_errors++; + stats->tx_carrier_errors++; dbg(DBG_ASS, "Tried to transmit but no carrier on card %d port %d\n", card->card_no, port->index); @@ -2326,7 +2336,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev) dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len, LEN_TX_BUFFER); dev_kfree_skb(skb); - dev->stats.tx_errors++; + stats->tx_errors++; return 0; } @@ -2358,7 +2368,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev) * This shouldn't have happened but such is life */ dev_kfree_skb(skb); - dev->stats.tx_errors++; + stats->tx_errors++; dbg(DBG_ASS, "Tx queue overflow card %d port %d\n", card->card_no, port->index); return 0; diff --git a/trunk/drivers/net/wan/hd6457x.c b/trunk/drivers/net/wan/hd6457x.c index 591fb45a7c68..8d0a1f2f00e5 100644 --- a/trunk/drivers/net/wan/hd6457x.c +++ b/trunk/drivers/net/wan/hd6457x.c @@ -271,9 +271,9 @@ static inline void sca_msci_intr(port_t *port) sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card); if (stat & ST1_UDRN) { - /* TX Underrun error detected */ - port_to_dev(port)->stats.tx_errors++; - port_to_dev(port)->stats.tx_fifo_errors++; + struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); + stats->tx_errors++; /* TX Underrun error detected */ + stats->tx_fifo_errors++; } if (stat & ST1_CDCD) @@ -286,6 +286,7 @@ static inline void sca_msci_intr(port_t *port) static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin) { struct net_device *dev = port_to_dev(port); + struct net_device_stats *stats = hdlc_stats(dev); struct sk_buff *skb; u16 len; u32 buff; @@ -297,7 +298,7 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1 len = readw(&desc->len); skb = dev_alloc_skb(len); if (!skb) { - dev->stats.rx_dropped++; + stats->rx_dropped++; return; } @@ -326,8 +327,8 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len); debug_frame(skb); #endif - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + stats->rx_packets++; + stats->rx_bytes += skb->len; dev->last_rx = jiffies; skb->protocol = hdlc_type_trans(skb, dev); netif_rx(skb); @@ -338,18 +339,17 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1 /* Receive DMA interrupt service */ static inline void sca_rx_intr(port_t *port) { - struct net_device *dev = port_to_dev(port); u16 dmac = get_dmac_rx(port); card_t *card = port_to_card(port); u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */ + struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); /* Reset DSR status bits */ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, DSR_RX(phy_node(port)), card); if (stat & DSR_BOF) - /* Dropped one or more frames */ - dev->stats.rx_over_errors++; + stats->rx_over_errors++; /* Dropped one or more frames */ while (1) { u32 desc_off = desc_offset(port, port->rxin, 0); @@ -364,14 +364,12 @@ static inline void sca_rx_intr(port_t *port) if (!(stat & ST_RX_EOM)) port->rxpart = 1; /* partial frame received */ else if ((stat & ST_ERROR_MASK) || port->rxpart) { - dev->stats.rx_errors++; - if (stat & ST_RX_OVERRUN) - dev->stats.rx_fifo_errors++; + stats->rx_errors++; + if (stat & ST_RX_OVERRUN) stats->rx_fifo_errors++; else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | ST_RX_RESBIT)) || port->rxpart) - dev->stats.rx_frame_errors++; - else if (stat & ST_RX_CRC) - dev->stats.rx_crc_errors++; + stats->rx_frame_errors++; + else if (stat & ST_RX_CRC) stats->rx_crc_errors++; if (stat & ST_RX_EOM) port->rxpart = 0; /* received last fragment */ } else @@ -392,6 +390,7 @@ static inline void sca_rx_intr(port_t *port) static inline void sca_tx_intr(port_t *port) { struct net_device *dev = port_to_dev(port); + struct net_device_stats *stats = hdlc_stats(dev); u16 dmac = get_dmac_tx(port); card_t* card = port_to_card(port); u8 stat; @@ -413,8 +412,8 @@ static inline void sca_tx_intr(port_t *port) break; /* Transmitter is/will_be sending this frame */ desc = desc_address(port, port->txlast, 1); - dev->stats.tx_packets++; - dev->stats.tx_bytes += readw(&desc->len); + stats->tx_packets++; + stats->tx_bytes += readw(&desc->len); writeb(0, &desc->stat); /* Free descriptor */ port->txlast = next_desc(port, port->txlast, 1); } diff --git a/trunk/drivers/net/wan/hdlc.c b/trunk/drivers/net/wan/hdlc.c index e3a536477c7e..7f984895b0d5 100644 --- a/trunk/drivers/net/wan/hdlc.c +++ b/trunk/drivers/net/wan/hdlc.c @@ -57,7 +57,7 @@ static int hdlc_change_mtu(struct net_device *dev, int new_mtu) static struct net_device_stats *hdlc_get_stats(struct net_device *dev) { - return &dev->stats; + return hdlc_stats(dev); } diff --git a/trunk/drivers/net/wan/hdlc_cisco.c b/trunk/drivers/net/wan/hdlc_cisco.c index 849819c2552d..762d21c1c703 100644 --- a/trunk/drivers/net/wan/hdlc_cisco.c +++ b/trunk/drivers/net/wan/hdlc_cisco.c @@ -252,8 +252,8 @@ static int cisco_rx(struct sk_buff *skb) dev_kfree_skb_any(skb); return NET_RX_DROP; -rx_error: - dev->stats.rx_errors++; /* Mark error */ + rx_error: + dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */ dev_kfree_skb_any(skb); return NET_RX_DROP; } diff --git a/trunk/drivers/net/wan/hdlc_fr.c b/trunk/drivers/net/wan/hdlc_fr.c index 109bab34094d..520bb0b1a9a2 100644 --- a/trunk/drivers/net/wan/hdlc_fr.c +++ b/trunk/drivers/net/wan/hdlc_fr.c @@ -135,6 +135,11 @@ typedef struct pvc_device_struct { }state; }pvc_device; +struct pvc_desc { + struct net_device_stats stats; + pvc_device *pvc; +}; + struct frad_state { fr_proto settings; pvc_device *first_pvc; @@ -174,6 +179,15 @@ static inline struct frad_state* state(hdlc_device *hdlc) return(struct frad_state *)(hdlc->state); } +static inline struct pvc_desc* pvcdev_to_desc(struct net_device *dev) +{ + return dev->priv; +} + +static inline struct net_device_stats* pvc_get_stats(struct net_device *dev) +{ + return &pvcdev_to_desc(dev)->stats; +} static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) { @@ -343,7 +357,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) static int pvc_open(struct net_device *dev) { - pvc_device *pvc = dev->priv; + pvc_device *pvc = pvcdev_to_desc(dev)->pvc; if ((pvc->frad->flags & IFF_UP) == 0) return -EIO; /* Frad must be UP in order to activate PVC */ @@ -363,7 +377,7 @@ static int pvc_open(struct net_device *dev) static int pvc_close(struct net_device *dev) { - pvc_device *pvc = dev->priv; + pvc_device *pvc = pvcdev_to_desc(dev)->pvc; if (--pvc->open_count == 0) { hdlc_device *hdlc = dev_to_hdlc(pvc->frad); @@ -382,7 +396,7 @@ static int pvc_close(struct net_device *dev) static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - pvc_device *pvc = dev->priv; + pvc_device *pvc = pvcdev_to_desc(dev)->pvc; fr_proto_pvc_info info; if (ifr->ifr_settings.type == IF_GET_PROTO) { @@ -410,7 +424,8 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) { - pvc_device *pvc = dev->priv; + pvc_device *pvc = pvcdev_to_desc(dev)->pvc; + struct net_device_stats *stats = pvc_get_stats(dev); if (pvc->state.active) { if (dev->type == ARPHRD_ETHER) { @@ -420,7 +435,7 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) if (skb_tailroom(skb) < pad) if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { - dev->stats.tx_dropped++; + stats->tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -430,17 +445,17 @@ static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) skb->protocol = __constant_htons(ETH_P_802_3); } if (!fr_hard_header(&skb, pvc->dlci)) { - dev->stats.tx_bytes += skb->len; - dev->stats.tx_packets++; + stats->tx_bytes += skb->len; + stats->tx_packets++; if (pvc->state.fecn) /* TX Congestion counter */ - dev->stats.tx_compressed++; + stats->tx_compressed++; skb->dev = pvc->frad; dev_queue_xmit(skb); return 0; } } - dev->stats.tx_dropped++; + stats->tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -940,7 +955,7 @@ static int fr_rx(struct sk_buff *skb) if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { - frad->stats.rx_dropped++; + dev_to_hdlc(frad)->stats.rx_dropped++; return NET_RX_DROP; } @@ -988,10 +1003,11 @@ static int fr_rx(struct sk_buff *skb) } if (dev) { - dev->stats.rx_packets++; /* PVC traffic */ - dev->stats.rx_bytes += skb->len; + struct net_device_stats *stats = pvc_get_stats(dev); + stats->rx_packets++; /* PVC traffic */ + stats->rx_bytes += skb->len; if (pvc->state.becn) - dev->stats.rx_compressed++; + stats->rx_compressed++; netif_rx(skb); return NET_RX_SUCCESS; } else { @@ -1000,7 +1016,7 @@ static int fr_rx(struct sk_buff *skb) } rx_error: - frad->stats.rx_errors++; /* Mark error */ + dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */ dev_kfree_skb_any(skb); return NET_RX_DROP; } @@ -1071,7 +1087,7 @@ static void pvc_setup(struct net_device *dev) static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) { hdlc_device *hdlc = dev_to_hdlc(frad); - pvc_device *pvc; + pvc_device *pvc = NULL; struct net_device *dev; int result, used; @@ -1087,9 +1103,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) used = pvc_is_used(pvc); if (type == ARPHRD_ETHER) - dev = alloc_netdev(0, "pvceth%d", ether_setup); + dev = alloc_netdev(sizeof(struct pvc_desc), "pvceth%d", + ether_setup); else - dev = alloc_netdev(0, "pvc%d", pvc_setup); + dev = alloc_netdev(sizeof(struct pvc_desc), "pvc%d", pvc_setup); if (!dev) { printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", @@ -1105,13 +1122,14 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) dlci_to_q922(dev->broadcast, dlci); } dev->hard_start_xmit = pvc_xmit; + dev->get_stats = pvc_get_stats; dev->open = pvc_open; dev->stop = pvc_close; dev->do_ioctl = pvc_ioctl; dev->change_mtu = pvc_change_mtu; dev->mtu = HDLC_MAX_MTU; dev->tx_queue_len = 0; - dev->priv = pvc; + pvcdev_to_desc(dev)->pvc = pvc; result = dev_alloc_name(dev, dev->name); if (result < 0) { diff --git a/trunk/drivers/net/wan/hdlc_raw_eth.c b/trunk/drivers/net/wan/hdlc_raw_eth.c index 26dee600506f..d20c685f6711 100644 --- a/trunk/drivers/net/wan/hdlc_raw_eth.c +++ b/trunk/drivers/net/wan/hdlc_raw_eth.c @@ -33,7 +33,7 @@ static int eth_tx(struct sk_buff *skb, struct net_device *dev) int len = skb->len; if (skb_tailroom(skb) < pad) if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { - dev->stats.tx_dropped++; + hdlc_stats(dev)->tx_dropped++; dev_kfree_skb(skb); return 0; } diff --git a/trunk/drivers/net/wan/hdlc_x25.c b/trunk/drivers/net/wan/hdlc_x25.c index e808720030ef..c15cc11e399b 100644 --- a/trunk/drivers/net/wan/hdlc_x25.c +++ b/trunk/drivers/net/wan/hdlc_x25.c @@ -164,15 +164,17 @@ static void x25_close(struct net_device *dev) static int x25_rx(struct sk_buff *skb) { + struct hdlc_device *hdlc = dev_to_hdlc(skb->dev); + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { - skb->dev->stats.rx_dropped++; + hdlc->stats.rx_dropped++; return NET_RX_DROP; } if (lapb_data_received(skb->dev, skb) == LAPB_OK) return NET_RX_SUCCESS; - skb->dev->stats.rx_errors++; + hdlc->stats.rx_errors++; dev_kfree_skb_any(skb); return NET_RX_DROP; } diff --git a/trunk/drivers/net/wan/pc300_drv.c b/trunk/drivers/net/wan/pc300_drv.c index 334170527755..57914fbd41d3 100644 --- a/trunk/drivers/net/wan/pc300_drv.c +++ b/trunk/drivers/net/wan/pc300_drv.c @@ -285,6 +285,7 @@ static void rx_dma_buf_init(pc300_t *, int); static void tx_dma_buf_check(pc300_t *, int); static void rx_dma_buf_check(pc300_t *, int); static irqreturn_t cpc_intr(int, void *); +static struct net_device_stats *cpc_get_stats(struct net_device *); static int clock_rate_calc(uclong, uclong, int *); static uclong detect_ram(pc300_t *); static void plx_init(pc300_t *); @@ -1774,12 +1775,13 @@ static void cpc_tx_timeout(struct net_device *dev) pc300dev_t *d = (pc300dev_t *) dev->priv; pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = hdlc_stats(dev); int ch = chan->channel; unsigned long flags; ucchar ilar; - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; + stats->tx_errors++; + stats->tx_aborted_errors++; CPC_LOCK(card, flags); if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) { printk("%s: ILAR=0x%x\n", dev->name, ilar); @@ -1801,6 +1803,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) pc300dev_t *d = (pc300dev_t *) dev->priv; pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = hdlc_stats(dev); int ch = chan->channel; unsigned long flags; #ifdef PC300_DEBUG_TX @@ -1814,13 +1817,13 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) } else if (!netif_carrier_ok(dev)) { /* DCD must be OFF: drop packet */ dev_kfree_skb(skb); - dev->stats.tx_errors++; - dev->stats.tx_carrier_errors++; + stats->tx_errors++; + stats->tx_carrier_errors++; return 0; } else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) { printk("%s: DCD is OFF. Going administrative down.\n", dev->name); - dev->stats.tx_errors++; - dev->stats.tx_carrier_errors++; + stats->tx_errors++; + stats->tx_carrier_errors++; dev_kfree_skb(skb); netif_carrier_off(dev); CPC_LOCK(card, flags); @@ -1840,8 +1843,8 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) // printk("%s: write error. Dropping TX packet.\n", dev->name); netif_stop_queue(dev); dev_kfree_skb(skb); - dev->stats.tx_errors++; - dev->stats.tx_dropped++; + stats->tx_errors++; + stats->tx_dropped++; return 0; } #ifdef PC300_DEBUG_TX @@ -1883,6 +1886,7 @@ static void cpc_net_rx(struct net_device *dev) pc300dev_t *d = (pc300dev_t *) dev->priv; pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; + struct net_device_stats *stats = hdlc_stats(dev); int ch = chan->channel; #ifdef PC300_DEBUG_RX int i; @@ -1918,24 +1922,24 @@ static void cpc_net_rx(struct net_device *dev) #endif if ((skb == NULL) && (rxb > 0)) { /* rxb > dev->mtu */ - dev->stats.rx_errors++; - dev->stats.rx_length_errors++; + stats->rx_errors++; + stats->rx_length_errors++; continue; } if (rxb < 0) { /* Invalid frame */ rxb = -rxb; if (rxb & DST_OVR) { - dev->stats.rx_errors++; - dev->stats.rx_fifo_errors++; + stats->rx_errors++; + stats->rx_fifo_errors++; } if (rxb & DST_CRC) { - dev->stats.rx_errors++; - dev->stats.rx_crc_errors++; + stats->rx_errors++; + stats->rx_crc_errors++; } if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) { - dev->stats.rx_errors++; - dev->stats.rx_frame_errors++; + stats->rx_errors++; + stats->rx_frame_errors++; } } if (skb) { @@ -1944,7 +1948,7 @@ static void cpc_net_rx(struct net_device *dev) continue; } - dev->stats.rx_bytes += rxb; + stats->rx_bytes += rxb; #ifdef PC300_DEBUG_RX printk("%s R:", dev->name); @@ -1955,7 +1959,7 @@ static void cpc_net_rx(struct net_device *dev) if (d->trace_on) { cpc_trace(dev, skb, 'R'); } - dev->stats.rx_packets++; + stats->rx_packets++; skb->protocol = hdlc_type_trans(skb, dev); netif_rx(skb); } @@ -1970,15 +1974,16 @@ static void sca_tx_intr(pc300dev_t *dev) pc300_t *card = (pc300_t *)chan->card; int ch = chan->channel; volatile pcsca_bd_t __iomem * ptdescr; + struct net_device_stats *stats = hdlc_stats(dev->dev); /* Clean up descriptors from previous transmission */ ptdescr = (card->hw.rambase + TX_BD_ADDR(ch,chan->tx_first_bd)); - while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) != - TX_BD_ADDR(ch,chan->tx_first_bd)) && - (cpc_readb(&ptdescr->status) & DST_OSB)) { - dev->dev->stats.tx_packets++; - dev->dev->stats.tx_bytes += cpc_readw(&ptdescr->len); + while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) != + TX_BD_ADDR(ch,chan->tx_first_bd)) && + (cpc_readb(&ptdescr->status) & DST_OSB)) { + stats->tx_packets++; + stats->tx_bytes += cpc_readw(&ptdescr->len); cpc_writeb(&ptdescr->status, DST_OSB); cpc_writew(&ptdescr->len, 0); chan->nfree_tx_bd++; @@ -2043,8 +2048,8 @@ static void sca_intr(pc300_t * card) } cpc_net_rx(dev); /* Discard invalid frames */ - dev->stats.rx_errors++; - dev->stats.rx_over_errors++; + hdlc_stats(dev)->rx_errors++; + hdlc_stats(dev)->rx_over_errors++; chan->rx_first_bd = 0; chan->rx_last_bd = N_DMA_RX_BUF - 1; rx_dma_start(card, ch); @@ -2110,8 +2115,8 @@ static void sca_intr(pc300_t * card) card->hw.cpld_reg2) & ~ (CPLD_REG2_FALC_LED1 << (2 * ch))); } - dev->stats.tx_errors++; - dev->stats.tx_fifo_errors++; + hdlc_stats(dev)->tx_errors++; + hdlc_stats(dev)->tx_fifo_errors++; sca_tx_intr(d); } } @@ -2599,7 +2604,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGPC300UTILSTATS: { if (!arg) { /* clear statistics */ - memset(&dev->stats, 0, sizeof(dev->stats)); + memset(hdlc_stats(dev), 0, sizeof(struct net_device_stats)); if (card->hw.type == PC300_TE) { memset(&chan->falc, 0, sizeof(falc_t)); } @@ -2610,8 +2615,8 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) pc300stats.hw_type = card->hw.type; pc300stats.line_on = card->chan[ch].d.line_on; pc300stats.line_off = card->chan[ch].d.line_off; - memcpy(&pc300stats.gen_stats, &dev->stats, - sizeof(dev->stats)); + memcpy(&pc300stats.gen_stats, hdlc_stats(dev), + sizeof(struct net_device_stats)); if (card->hw.type == PC300_TE) memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t)); if (copy_to_user(arg, &pc300stats, sizeof(pc300stats_t))) @@ -2818,6 +2823,11 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +static struct net_device_stats *cpc_get_stats(struct net_device *dev) +{ + return hdlc_stats(dev); +} + static int clock_rate_calc(uclong rate, uclong clock, int *br_io) { int br, tc; @@ -3384,6 +3394,7 @@ static void cpc_init_card(pc300_t * card) dev->stop = cpc_close; dev->tx_timeout = cpc_tx_timeout; dev->watchdog_timeo = PC300_TX_TIMEOUT; + dev->get_stats = cpc_get_stats; dev->set_multicast_list = NULL; dev->set_mac_address = NULL; dev->change_mtu = cpc_change_mtu; diff --git a/trunk/drivers/net/wan/pc300_tty.c b/trunk/drivers/net/wan/pc300_tty.c index eae94ab6b818..e03eef2f2282 100644 --- a/trunk/drivers/net/wan/pc300_tty.c +++ b/trunk/drivers/net/wan/pc300_tty.c @@ -458,7 +458,7 @@ static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int c CPC_TTY_DBG("%s: cpc_tty_write data len=%i\n",cpc_tty->name,count); pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan; - stats = &cpc_tty->pc300dev->dev->stats; + stats = hdlc_stats(((pc300dev_t*)cpc_tty->pc300dev)->dev); card = (pc300_t *) pc300chan->card; ch = pc300chan->channel; @@ -743,7 +743,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev) pc300_t *card = (pc300_t *)pc300chan->card; int ch = pc300chan->channel; volatile pcsca_bd_t __iomem * ptdescr; - struct net_device_stats *stats = &pc300dev->dev->stats; + struct net_device_stats *stats = hdlc_stats(pc300dev->dev); int rx_len, rx_aux; volatile unsigned char status; unsigned short first_bd = pc300chan->rx_first_bd; @@ -917,7 +917,7 @@ static int cpc_tty_send_to_card(pc300dev_t *dev,void* buf, int len) pc300ch_t *chan = (pc300ch_t *)dev->chan; pc300_t *card = (pc300_t *)chan->card; int ch = chan->channel; - struct net_device_stats *stats = &dev->dev->stats; + struct net_device_stats *stats = hdlc_stats(dev->dev); unsigned long flags; volatile pcsca_bd_t __iomem *ptdescr; int i, nchar; diff --git a/trunk/drivers/net/wan/wanxl.c b/trunk/drivers/net/wan/wanxl.c index a8a5ca0ee6c2..d4aab8a28b61 100644 --- a/trunk/drivers/net/wan/wanxl.c +++ b/trunk/drivers/net/wan/wanxl.c @@ -161,6 +161,7 @@ static inline void wanxl_cable_intr(port_t *port) static inline void wanxl_tx_intr(port_t *port) { struct net_device *dev = port->dev; + struct net_device_stats *stats = hdlc_stats(dev); while (1) { desc_t *desc = &get_status(port)->tx_descs[port->tx_in]; struct sk_buff *skb = port->tx_skbs[port->tx_in]; @@ -172,13 +173,13 @@ static inline void wanxl_tx_intr(port_t *port) return; case PACKET_UNDERRUN: - dev->stats.tx_errors++; - dev->stats.tx_fifo_errors++; + stats->tx_errors++; + stats->tx_fifo_errors++; break; default: - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + stats->tx_packets++; + stats->tx_bytes += skb->len; } desc->stat = PACKET_EMPTY; /* Free descriptor */ pci_unmap_single(port->card->pdev, desc->address, skb->len, @@ -204,9 +205,10 @@ static inline void wanxl_rx_intr(card_t *card) port_t *port = &card->ports[desc->stat & PACKET_PORT_MASK]; struct net_device *dev = port->dev; + struct net_device_stats *stats = hdlc_stats(dev); if (!skb) - dev->stats.rx_dropped++; + stats->rx_dropped++; else { pci_unmap_single(card->pdev, desc->address, BUFFER_LENGTH, @@ -218,8 +220,8 @@ static inline void wanxl_rx_intr(card_t *card) skb->len); debug_frame(skb); #endif - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + stats->rx_packets++; + stats->rx_bytes += skb->len; dev->last_rx = jiffies; skb->protocol = hdlc_type_trans(skb, dev); netif_rx(skb); @@ -466,13 +468,13 @@ static int wanxl_close(struct net_device *dev) static struct net_device_stats *wanxl_get_stats(struct net_device *dev) { + struct net_device_stats *stats = hdlc_stats(dev); port_t *port = dev_to_port(dev); - dev->stats.rx_over_errors = get_status(port)->rx_overruns; - dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors; - dev->stats.rx_errors = dev->stats.rx_over_errors + - dev->stats.rx_frame_errors; - return &dev->stats; + stats->rx_over_errors = get_status(port)->rx_overruns; + stats->rx_frame_errors = get_status(port)->rx_frame_errors; + stats->rx_errors = stats->rx_over_errors + stats->rx_frame_errors; + return stats; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/trunk/drivers/net/wireless/iwlwifi/iwl-rfkill.c index ffefbb487e12..32b1c4b4c6a2 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-rfkill.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-rfkill.c @@ -50,7 +50,7 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return 0; - IWL_DEBUG_RF_KILL("we recieved soft RFKILL set to state %d\n", state); + IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state); mutex_lock(&priv->mutex); switch (state) { diff --git a/trunk/include/linux/ethtool.h b/trunk/include/linux/ethtool.h index 8bb5e87df365..c8d216357865 100644 --- a/trunk/include/linux/ethtool.h +++ b/trunk/include/linux/ethtool.h @@ -272,12 +272,6 @@ enum ethtool_flags { ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ }; -struct ethtool_rxnfc { - __u32 cmd; - __u32 flow_type; - __u64 data; -}; - #ifdef __KERNEL__ struct net_device; @@ -402,8 +396,6 @@ struct ethtool_ops { /* the following hooks are obsolete */ int (*self_test_count)(struct net_device *);/* use get_sset_count */ int (*get_stats_count)(struct net_device *);/* use get_sset_count */ - int (*get_rxhash)(struct net_device *, struct ethtool_rxnfc *); - int (*set_rxhash)(struct net_device *, struct ethtool_rxnfc *); }; #endif /* __KERNEL__ */ @@ -450,9 +442,6 @@ struct ethtool_ops { #define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ #define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ -#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ -#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ - /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET #define SPARC_ETH_SSET ETHTOOL_SSET @@ -539,26 +528,4 @@ struct ethtool_ops { #define WAKE_MAGIC (1 << 5) #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ -/* L3-L4 network traffic flow types */ -#define TCP_V4_FLOW 0x01 -#define UDP_V4_FLOW 0x02 -#define SCTP_V4_FLOW 0x03 -#define AH_ESP_V4_FLOW 0x04 -#define TCP_V6_FLOW 0x05 -#define UDP_V6_FLOW 0x06 -#define SCTP_V6_FLOW 0x07 -#define AH_ESP_V6_FLOW 0x08 - -/* L3-L4 network traffic flow hash options */ -#define RXH_DEV_PORT (1 << 0) -#define RXH_L2DA (1 << 1) -#define RXH_VLAN (1 << 2) -#define RXH_L3_PROTO (1 << 3) -#define RXH_IP_SRC (1 << 4) -#define RXH_IP_DST (1 << 5) -#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ -#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ -#define RXH_DISCARD (1 << 31) - - #endif /* _LINUX_ETHTOOL_H */ diff --git a/trunk/include/linux/hdlc.h b/trunk/include/linux/hdlc.h index c59769693bee..6115545a5b9c 100644 --- a/trunk/include/linux/hdlc.h +++ b/trunk/include/linux/hdlc.h @@ -45,6 +45,7 @@ struct hdlc_proto { /* Pointed to by dev->priv */ typedef struct hdlc_device { + struct net_device_stats stats; /* used by HDLC layer to take control over HDLC device from hw driver*/ int (*attach)(struct net_device *dev, unsigned short encoding, unsigned short parity); @@ -108,6 +109,12 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, /* May be used by hardware driver to gain control over HDLC device */ void detach_hdlc_protocol(struct net_device *dev); +static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev) +{ + return &dev_to_hdlc(dev)->stats; +} + + static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, struct net_device *dev) { diff --git a/trunk/include/linux/if_tun.h b/trunk/include/linux/if_tun.h index 563fae542da6..18f31b6187a3 100644 --- a/trunk/include/linux/if_tun.h +++ b/trunk/include/linux/if_tun.h @@ -31,7 +31,6 @@ #define TUN_NO_PI 0x0040 #define TUN_ONE_QUEUE 0x0080 #define TUN_PERSIST 0x0100 -#define TUN_VNET_HDR 0x0200 /* Ioctl defines */ #define TUNSETNOCSUM _IOW('T', 200, int) @@ -41,21 +40,12 @@ #define TUNSETOWNER _IOW('T', 204, int) #define TUNSETLINK _IOW('T', 205, int) #define TUNSETGROUP _IOW('T', 206, int) -#define TUNGETFEATURES _IOR('T', 207, unsigned int) -#define TUNSETOFFLOAD _IOW('T', 208, unsigned int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 #define IFF_TAP 0x0002 #define IFF_NO_PI 0x1000 #define IFF_ONE_QUEUE 0x2000 -#define IFF_VNET_HDR 0x4000 - -/* Features for GSO (TUNSETOFFLOAD). */ -#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ -#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ -#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ -#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ struct tun_pi { unsigned short flags; diff --git a/trunk/include/linux/if_vlan.h b/trunk/include/linux/if_vlan.h index 5190452ac7dc..15ace02b7b24 100644 --- a/trunk/include/linux/if_vlan.h +++ b/trunk/include/linux/if_vlan.h @@ -402,7 +402,6 @@ enum vlan_ioctl_cmds { enum vlan_flags { VLAN_FLAG_REORDER_HDR = 0x1, - VLAN_FLAG_GVRP = 0x2, }; enum vlan_name_types { diff --git a/trunk/include/linux/igmp.h b/trunk/include/linux/igmp.h index 7bb3c095c15b..f5a1a0db2e8e 100644 --- a/trunk/include/linux/igmp.h +++ b/trunk/include/linux/igmp.h @@ -228,6 +228,7 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif); +extern void ip_mr_init(void); extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); extern void ip_mc_up(struct in_device *); diff --git a/trunk/include/linux/ipv6.h b/trunk/include/linux/ipv6.h index 391ad0843a46..cde056e08181 100644 --- a/trunk/include/linux/ipv6.h +++ b/trunk/include/linux/ipv6.h @@ -163,8 +163,6 @@ struct ipv6_devconf { #ifdef CONFIG_IPV6_MROUTE __s32 mc_forwarding; #endif - __s32 disable_ipv6; - __s32 accept_dad; void *sysctl; }; @@ -196,8 +194,6 @@ enum { DEVCONF_OPTIMISTIC_DAD, DEVCONF_ACCEPT_SOURCE_ROUTE, DEVCONF_MC_FORWARDING, - DEVCONF_DISABLE_IPV6, - DEVCONF_ACCEPT_DAD, DEVCONF_MAX }; diff --git a/trunk/include/linux/mroute.h b/trunk/include/linux/mroute.h index 07112ee9293a..de4decfa1bfc 100644 --- a/trunk/include/linux/mroute.h +++ b/trunk/include/linux/mroute.h @@ -144,37 +144,11 @@ static inline int ip_mroute_opt(int opt) } #endif -#ifdef CONFIG_IP_MROUTE extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int); extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern int ip_mr_init(void); -#else -static inline -int ip_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, int optlen) -{ - return -ENOPROTOOPT; -} - -static inline -int ip_mroute_getsockopt(struct sock *sock, - int optname, char __user *optval, int __user *optlen) -{ - return -ENOPROTOOPT; -} +extern void ip_mr_init(void); -static inline -int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) -{ - return -ENOIOCTLCMD; -} - -static inline int ip_mr_init(void) -{ - return 0; -} -#endif struct vif_device { diff --git a/trunk/include/linux/mroute6.h b/trunk/include/linux/mroute6.h index 5cf50473a10f..e7989593142b 100644 --- a/trunk/include/linux/mroute6.h +++ b/trunk/include/linux/mroute6.h @@ -131,44 +131,11 @@ static inline int ip6_mroute_opt(int opt) struct sock; -#ifdef CONFIG_IPV6_MROUTE extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern int ip6_mr_init(void); -extern void ip6_mr_cleanup(void); -#else -static inline -int ip6_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, int optlen) -{ - return -ENOPROTOOPT; -} - -static inline -int ip6_mroute_getsockopt(struct sock *sock, - int optname, char __user *optval, int __user *optlen) -{ - return -ENOPROTOOPT; -} - -static inline -int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) -{ - return -ENOIOCTLCMD; -} - -static inline int ip6_mr_init(void) -{ - return 0; -} - -static inline void ip6_mr_cleanup(void) -{ - return; -} -#endif +extern void ip6_mr_init(void); struct mif_device { diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index e009c6fbf5cd..56dadb528f67 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -740,8 +740,6 @@ struct net_device struct net_bridge_port *br_port; /* macvlan */ struct macvlan_port *macvlan_port; - /* GARP */ - struct garp_port *garp_port; /* class/net/name entry */ struct device dev; diff --git a/trunk/include/net/fib_rules.h b/trunk/include/net/fib_rules.h index c2bb5cae6515..a5c6ccc5bb19 100644 --- a/trunk/include/net/fib_rules.h +++ b/trunk/include/net/fib_rules.h @@ -62,7 +62,7 @@ struct fib_rules_ops /* Called after modifications to the rules set, must flush * the route cache if one exists. */ - void (*flush_cache)(struct fib_rules_ops *ops); + void (*flush_cache)(void); int nlgroup; const struct nla_policy *policy; diff --git a/trunk/include/net/garp.h b/trunk/include/net/garp.h deleted file mode 100644 index 825f172caba9..000000000000 --- a/trunk/include/net/garp.h +++ /dev/null @@ -1,128 +0,0 @@ -#ifndef _NET_GARP_H -#define _NET_GARP_H - -#include - -#define GARP_PROTOCOL_ID 0x1 -#define GARP_END_MARK 0x0 - -struct garp_pdu_hdr { - __be16 protocol; -}; - -struct garp_msg_hdr { - u8 attrtype; -}; - -enum garp_attr_event { - GARP_LEAVE_ALL, - GARP_JOIN_EMPTY, - GARP_JOIN_IN, - GARP_LEAVE_EMPTY, - GARP_LEAVE_IN, - GARP_EMPTY, -}; - -struct garp_attr_hdr { - u8 len; - u8 event; - u8 data[]; -}; - -struct garp_skb_cb { - u8 cur_type; -}; - -static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb) -{ - BUILD_BUG_ON(sizeof(struct garp_skb_cb) > - FIELD_SIZEOF(struct sk_buff, cb)); - return (struct garp_skb_cb *)skb->cb; -} - -enum garp_applicant_state { - GARP_APPLICANT_INVALID, - GARP_APPLICANT_VA, - GARP_APPLICANT_AA, - GARP_APPLICANT_QA, - GARP_APPLICANT_LA, - GARP_APPLICANT_VP, - GARP_APPLICANT_AP, - GARP_APPLICANT_QP, - GARP_APPLICANT_VO, - GARP_APPLICANT_AO, - GARP_APPLICANT_QO, - __GARP_APPLICANT_MAX -}; -#define GARP_APPLICANT_MAX (__GARP_APPLICANT_MAX - 1) - -enum garp_event { - GARP_EVENT_REQ_JOIN, - GARP_EVENT_REQ_LEAVE, - GARP_EVENT_R_JOIN_IN, - GARP_EVENT_R_JOIN_EMPTY, - GARP_EVENT_R_EMPTY, - GARP_EVENT_R_LEAVE_IN, - GARP_EVENT_R_LEAVE_EMPTY, - GARP_EVENT_TRANSMIT_PDU, - __GARP_EVENT_MAX -}; -#define GARP_EVENT_MAX (__GARP_EVENT_MAX - 1) - -enum garp_action { - GARP_ACTION_NONE, - GARP_ACTION_S_JOIN_IN, - GARP_ACTION_S_LEAVE_EMPTY, -}; - -struct garp_attr { - struct rb_node node; - enum garp_applicant_state state; - u8 type; - u8 dlen; - unsigned char data[]; -}; - -enum garp_applications { - GARP_APPLICATION_GVRP, - __GARP_APPLICATION_MAX -}; -#define GARP_APPLICATION_MAX (__GARP_APPLICATION_MAX - 1) - -struct garp_application { - enum garp_applications type; - unsigned int maxattr; - struct stp_proto proto; -}; - -struct garp_applicant { - struct garp_application *app; - struct net_device *dev; - struct timer_list join_timer; - - spinlock_t lock; - struct sk_buff_head queue; - struct sk_buff *pdu; - struct rb_root gid; -}; - -struct garp_port { - struct garp_applicant *applicants[GARP_APPLICATION_MAX + 1]; -}; - -extern int garp_register_application(struct garp_application *app); -extern void garp_unregister_application(struct garp_application *app); - -extern int garp_init_applicant(struct net_device *dev, - struct garp_application *app); -extern void garp_uninit_applicant(struct net_device *dev, - struct garp_application *app); - -extern int garp_request_join(const struct net_device *dev, - const struct garp_application *app, - const void *data, u8 len, u8 type); -extern void garp_request_leave(const struct net_device *dev, - const struct garp_application *app, - const void *data, u8 len, u8 type); - -#endif /* _NET_GARP_H */ diff --git a/trunk/include/net/netns/ipv4.h b/trunk/include/net/netns/ipv4.h index a6ed83853dcc..6ef90b5fafb3 100644 --- a/trunk/include/net/netns/ipv4.h +++ b/trunk/include/net/netns/ipv4.h @@ -18,7 +18,6 @@ struct netns_ipv4 { struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; struct ctl_table_header *ipv4_hdr; - struct ctl_table_header *route_hdr; #endif struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; @@ -46,8 +45,5 @@ struct netns_ipv4 { int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; - - struct timer_list rt_secret_timer; - atomic_t rt_genid; }; #endif diff --git a/trunk/include/net/route.h b/trunk/include/net/route.h index 3140cc500854..fc836ff824cc 100644 --- a/trunk/include/net/route.h +++ b/trunk/include/net/route.h @@ -111,7 +111,7 @@ struct in_device; extern int ip_rt_init(void); extern void ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw, __be32 src, struct net_device *dev); -extern void rt_cache_flush(struct net *net, int how); +extern void rt_cache_flush(int how); extern int __ip_route_output_key(struct net *, struct rtable **, const struct flowi *flp); extern int ip_route_output_key(struct net *, struct rtable **, struct flowi *flp); extern int ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags); diff --git a/trunk/include/net/stp.h b/trunk/include/net/stp.h deleted file mode 100644 index ad447f105417..000000000000 --- a/trunk/include/net/stp.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _NET_STP_H -#define _NET_STP_H - -struct stp_proto { - unsigned char group_address[ETH_ALEN]; - void (*rcv)(const struct stp_proto *, struct sk_buff *, - struct net_device *); - void *data; -}; - -extern int stp_proto_register(const struct stp_proto *proto); -extern void stp_proto_unregister(const struct stp_proto *proto); - -#endif /* _NET_STP_H */ diff --git a/trunk/include/net/udp.h b/trunk/include/net/udp.h index 3e551592aa76..7a8684855245 100644 --- a/trunk/include/net/udp.h +++ b/trunk/include/net/udp.h @@ -158,17 +158,17 @@ DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); /* * SNMP statistics for UDP and UDP-Lite */ -#define UDP_INC_STATS_USER(net, field, is_udplite) do { (void)net; \ +#define UDP_INC_STATS_USER(field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_USER(udplite_statistics, field); \ else SNMP_INC_STATS_USER(udp_statistics, field); } while(0) -#define UDP_INC_STATS_BH(net, field, is_udplite) do { (void)net; \ +#define UDP_INC_STATS_BH(field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_BH(udplite_statistics, field); \ else SNMP_INC_STATS_BH(udp_statistics, field); } while(0) -#define UDP6_INC_STATS_BH(net, field, is_udplite) do { (void)net; \ +#define UDP6_INC_STATS_BH(field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_BH(udplite_stats_in6, field); \ else SNMP_INC_STATS_BH(udp_stats_in6, field); } while(0) -#define UDP6_INC_STATS_USER(net, field, is_udplite) do { (void)net; \ +#define UDP6_INC_STATS_USER(field, is_udplite) do { \ if (is_udplite) SNMP_INC_STATS_USER(udplite_stats_in6, field); \ else SNMP_INC_STATS_USER(udp_stats_in6, field); } while(0) @@ -176,12 +176,12 @@ DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); #define UDPX_INC_STATS_BH(sk, field) \ do { \ if ((sk)->sk_family == AF_INET) \ - UDP_INC_STATS_BH(sock_net(sk), field, 0); \ + UDP_INC_STATS_BH(field, 0); \ else \ - UDP6_INC_STATS_BH(sock_net(sk), field, 0); \ + UDP6_INC_STATS_BH(field, 0); \ } while (0); #else -#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0) +#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(field, 0) #endif /* /proc */ diff --git a/trunk/net/802/Kconfig b/trunk/net/802/Kconfig deleted file mode 100644 index be33d27c8e69..000000000000 --- a/trunk/net/802/Kconfig +++ /dev/null @@ -1,7 +0,0 @@ -config STP - tristate - select LLC - -config GARP - tristate - select STP diff --git a/trunk/net/802/Makefile b/trunk/net/802/Makefile index 7893d679910c..68569ffddea1 100644 --- a/trunk/net/802/Makefile +++ b/trunk/net/802/Makefile @@ -10,5 +10,3 @@ obj-$(CONFIG_FDDI) += fddi.o obj-$(CONFIG_HIPPI) += hippi.o obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o obj-$(CONFIG_ATALK) += p8022.o psnap.o -obj-$(CONFIG_STP) += stp.o -obj-$(CONFIG_GARP) += garp.o diff --git a/trunk/net/802/garp.c b/trunk/net/802/garp.c deleted file mode 100644 index 3b78f7b74fd4..000000000000 --- a/trunk/net/802/garp.c +++ /dev/null @@ -1,633 +0,0 @@ -/* - * IEEE 802.1D Generic Attribute Registration Protocol (GARP) - * - * Copyright (c) 2008 Patrick McHardy - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static unsigned int garp_join_time __read_mostly = 200; -module_param(garp_join_time, uint, 0644); -MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)"); -MODULE_LICENSE("GPL"); - -static const struct garp_state_trans { - u8 state; - u8 action; -} garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = { - [GARP_APPLICANT_VA] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, - .action = GARP_ACTION_S_JOIN_IN }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, - }, - [GARP_APPLICANT_AA] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, - .action = GARP_ACTION_S_JOIN_IN }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, - }, - [GARP_APPLICANT_QA] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, - }, - [GARP_APPLICANT_LA] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO, - .action = GARP_ACTION_S_LEAVE_EMPTY }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, - }, - [GARP_APPLICANT_VP] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, - .action = GARP_ACTION_S_JOIN_IN }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO }, - }, - [GARP_APPLICANT_AP] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, - .action = GARP_ACTION_S_JOIN_IN }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO }, - }, - [GARP_APPLICANT_QP] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO }, - }, - [GARP_APPLICANT_VO] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, - }, - [GARP_APPLICANT_AO] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, - }, - [GARP_APPLICANT_QO] = { - [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, - [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, - [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, - [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP }, - [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, - }, -}; - -static int garp_attr_cmp(const struct garp_attr *attr, - const void *data, u8 len, u8 type) -{ - if (attr->type != type) - return attr->type - type; - if (attr->dlen != len) - return attr->dlen - len; - return memcmp(attr->data, data, len); -} - -static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, - const void *data, u8 len, u8 type) -{ - struct rb_node *parent = app->gid.rb_node; - struct garp_attr *attr; - int d; - - while (parent) { - attr = rb_entry(parent, struct garp_attr, node); - d = garp_attr_cmp(attr, data, len, type); - if (d < 0) - parent = parent->rb_left; - else if (d > 0) - parent = parent->rb_right; - else - return attr; - } - return NULL; -} - -static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new) -{ - struct rb_node *parent = NULL, **p = &app->gid.rb_node; - struct garp_attr *attr; - int d; - - while (*p) { - parent = *p; - attr = rb_entry(parent, struct garp_attr, node); - d = garp_attr_cmp(attr, new->data, new->dlen, new->type); - if (d < 0) - p = &parent->rb_left; - else if (d > 0) - p = &parent->rb_right; - } - rb_link_node(&new->node, parent, p); - rb_insert_color(&new->node, &app->gid); -} - -static struct garp_attr *garp_attr_create(struct garp_applicant *app, - const void *data, u8 len, u8 type) -{ - struct garp_attr *attr; - - attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); - if (!attr) - return attr; - attr->state = GARP_APPLICANT_VO; - attr->type = type; - attr->dlen = len; - memcpy(attr->data, data, len); - garp_attr_insert(app, attr); - return attr; -} - -static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr) -{ - rb_erase(&attr->node, &app->gid); - kfree(attr); -} - -static int garp_pdu_init(struct garp_applicant *app) -{ - struct sk_buff *skb; - struct garp_pdu_hdr *gp; - -#define LLC_RESERVE sizeof(struct llc_pdu_un) - skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev), - GFP_ATOMIC); - if (!skb) - return -ENOMEM; - - skb->dev = app->dev; - skb->protocol = htons(ETH_P_802_2); - skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE); - - gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp)); - put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol); - - app->pdu = skb; - return 0; -} - -static int garp_pdu_append_end_mark(struct garp_applicant *app) -{ - if (skb_tailroom(app->pdu) < sizeof(u8)) - return -1; - *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK; - return 0; -} - -static void garp_pdu_queue(struct garp_applicant *app) -{ - if (!app->pdu) - return; - - garp_pdu_append_end_mark(app); - garp_pdu_append_end_mark(app); - - llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN, - LLC_SAP_BSPAN, LLC_PDU_CMD); - llc_pdu_init_as_ui_cmd(app->pdu); - llc_mac_hdr_init(app->pdu, app->dev->dev_addr, - app->app->proto.group_address); - - skb_queue_tail(&app->queue, app->pdu); - app->pdu = NULL; -} - -static void garp_queue_xmit(struct garp_applicant *app) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(&app->queue))) - dev_queue_xmit(skb); -} - -static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype) -{ - struct garp_msg_hdr *gm; - - if (skb_tailroom(app->pdu) < sizeof(*gm)) - return -1; - gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm)); - gm->attrtype = attrtype; - garp_cb(app->pdu)->cur_type = attrtype; - return 0; -} - -static int garp_pdu_append_attr(struct garp_applicant *app, - const struct garp_attr *attr, - enum garp_attr_event event) -{ - struct garp_attr_hdr *ga; - unsigned int len; - int err; -again: - if (!app->pdu) { - err = garp_pdu_init(app); - if (err < 0) - return err; - } - - if (garp_cb(app->pdu)->cur_type != attr->type) { - if (garp_cb(app->pdu)->cur_type && - garp_pdu_append_end_mark(app) < 0) - goto queue; - if (garp_pdu_append_msg(app, attr->type) < 0) - goto queue; - } - - len = sizeof(*ga) + attr->dlen; - if (skb_tailroom(app->pdu) < len) - goto queue; - ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len); - ga->len = len; - ga->event = event; - memcpy(ga->data, attr->data, attr->dlen); - return 0; - -queue: - garp_pdu_queue(app); - goto again; -} - -static void garp_attr_event(struct garp_applicant *app, - struct garp_attr *attr, enum garp_event event) -{ - enum garp_applicant_state state; - - state = garp_applicant_state_table[attr->state][event].state; - if (state == GARP_APPLICANT_INVALID) - return; - - switch (garp_applicant_state_table[attr->state][event].action) { - case GARP_ACTION_NONE: - break; - case GARP_ACTION_S_JOIN_IN: - garp_pdu_append_attr(app, attr, GARP_JOIN_IN); - break; - case GARP_ACTION_S_LEAVE_EMPTY: - garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY); - /* As a pure applicant, sending a leave message implies that - * the attribute was unregistered and can be destroyed. */ - garp_attr_destroy(app, attr); - return; - default: - WARN_ON(1); - } - - attr->state = state; -} - -int garp_request_join(const struct net_device *dev, - const struct garp_application *appl, - const void *data, u8 len, u8 type) -{ - struct garp_port *port = dev->garp_port; - struct garp_applicant *app = port->applicants[appl->type]; - struct garp_attr *attr; - - spin_lock_bh(&app->lock); - attr = garp_attr_create(app, data, len, type); - if (!attr) { - spin_unlock_bh(&app->lock); - return -ENOMEM; - } - garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN); - spin_unlock_bh(&app->lock); - return 0; -} -EXPORT_SYMBOL_GPL(garp_request_join); - -void garp_request_leave(const struct net_device *dev, - const struct garp_application *appl, - const void *data, u8 len, u8 type) -{ - struct garp_port *port = dev->garp_port; - struct garp_applicant *app = port->applicants[appl->type]; - struct garp_attr *attr; - - spin_lock_bh(&app->lock); - attr = garp_attr_lookup(app, data, len, type); - if (!attr) { - spin_unlock_bh(&app->lock); - return; - } - garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE); - spin_unlock_bh(&app->lock); -} -EXPORT_SYMBOL_GPL(garp_request_leave); - -static void garp_gid_event(struct garp_applicant *app, enum garp_event event) -{ - struct rb_node *node, *next; - struct garp_attr *attr; - - for (node = rb_first(&app->gid); - next = node ? rb_next(node) : NULL, node != NULL; - node = next) { - attr = rb_entry(node, struct garp_attr, node); - garp_attr_event(app, attr, event); - } -} - -static void garp_join_timer_arm(struct garp_applicant *app) -{ - unsigned long delay; - - delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32; - mod_timer(&app->join_timer, jiffies + delay); -} - -static void garp_join_timer(unsigned long data) -{ - struct garp_applicant *app = (struct garp_applicant *)data; - - spin_lock(&app->lock); - garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); - garp_pdu_queue(app); - spin_unlock(&app->lock); - - garp_queue_xmit(app); - garp_join_timer_arm(app); -} - -static int garp_pdu_parse_end_mark(struct sk_buff *skb) -{ - if (!pskb_may_pull(skb, sizeof(u8))) - return -1; - if (*skb->data == GARP_END_MARK) { - skb_pull(skb, sizeof(u8)); - return -1; - } - return 0; -} - -static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb, - u8 attrtype) -{ - const struct garp_attr_hdr *ga; - struct garp_attr *attr; - enum garp_event event; - unsigned int dlen; - - if (!pskb_may_pull(skb, sizeof(*ga))) - return -1; - ga = (struct garp_attr_hdr *)skb->data; - if (ga->len < sizeof(*ga)) - return -1; - - if (!pskb_may_pull(skb, ga->len)) - return -1; - skb_pull(skb, ga->len); - dlen = sizeof(*ga) - ga->len; - - if (attrtype > app->app->maxattr) - return 0; - - switch (ga->event) { - case GARP_LEAVE_ALL: - if (dlen != 0) - return -1; - garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY); - return 0; - case GARP_JOIN_EMPTY: - event = GARP_EVENT_R_JOIN_EMPTY; - break; - case GARP_JOIN_IN: - event = GARP_EVENT_R_JOIN_IN; - break; - case GARP_LEAVE_EMPTY: - event = GARP_EVENT_R_LEAVE_EMPTY; - break; - case GARP_EMPTY: - event = GARP_EVENT_R_EMPTY; - break; - default: - return 0; - } - - if (dlen == 0) - return -1; - attr = garp_attr_lookup(app, ga->data, dlen, attrtype); - if (attr == NULL) - return 0; - garp_attr_event(app, attr, event); - return 0; -} - -static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb) -{ - const struct garp_msg_hdr *gm; - - if (!pskb_may_pull(skb, sizeof(*gm))) - return -1; - gm = (struct garp_msg_hdr *)skb->data; - if (gm->attrtype == 0) - return -1; - skb_pull(skb, sizeof(*gm)); - - while (skb->len > 0) { - if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0) - return -1; - if (garp_pdu_parse_end_mark(skb) < 0) - break; - } - return 0; -} - -static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb, - struct net_device *dev) -{ - struct garp_application *appl = proto->data; - struct garp_port *port; - struct garp_applicant *app; - const struct garp_pdu_hdr *gp; - - port = rcu_dereference(dev->garp_port); - if (!port) - goto err; - app = rcu_dereference(port->applicants[appl->type]); - if (!app) - goto err; - - if (!pskb_may_pull(skb, sizeof(*gp))) - goto err; - gp = (struct garp_pdu_hdr *)skb->data; - if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID)) - goto err; - skb_pull(skb, sizeof(*gp)); - - spin_lock(&app->lock); - while (skb->len > 0) { - if (garp_pdu_parse_msg(app, skb) < 0) - break; - if (garp_pdu_parse_end_mark(skb) < 0) - break; - } - spin_unlock(&app->lock); -err: - kfree_skb(skb); -} - -static int garp_init_port(struct net_device *dev) -{ - struct garp_port *port; - - port = kzalloc(sizeof(*port), GFP_KERNEL); - if (!port) - return -ENOMEM; - rcu_assign_pointer(dev->garp_port, port); - return 0; -} - -static void garp_release_port(struct net_device *dev) -{ - struct garp_port *port = dev->garp_port; - unsigned int i; - - for (i = 0; i <= GARP_APPLICATION_MAX; i++) { - if (port->applicants[i]) - return; - } - rcu_assign_pointer(dev->garp_port, NULL); - synchronize_rcu(); - kfree(port); -} - -int garp_init_applicant(struct net_device *dev, struct garp_application *appl) -{ - struct garp_applicant *app; - int err; - - ASSERT_RTNL(); - - if (!dev->garp_port) { - err = garp_init_port(dev); - if (err < 0) - goto err1; - } - - err = -ENOMEM; - app = kzalloc(sizeof(*app), GFP_KERNEL); - if (!app) - goto err2; - - err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); - if (err < 0) - goto err3; - - app->dev = dev; - app->app = appl; - app->gid = RB_ROOT; - spin_lock_init(&app->lock); - skb_queue_head_init(&app->queue); - rcu_assign_pointer(dev->garp_port->applicants[appl->type], app); - setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app); - garp_join_timer_arm(app); - return 0; - -err3: - kfree(app); -err2: - garp_release_port(dev); -err1: - return err; -} -EXPORT_SYMBOL_GPL(garp_init_applicant); - -void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) -{ - struct garp_port *port = dev->garp_port; - struct garp_applicant *app = port->applicants[appl->type]; - - ASSERT_RTNL(); - - rcu_assign_pointer(port->applicants[appl->type], NULL); - synchronize_rcu(); - - /* Delete timer and generate a final TRANSMIT_PDU event to flush out - * all pending messages before the applicant is gone. */ - del_timer_sync(&app->join_timer); - garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); - garp_pdu_queue(app); - garp_queue_xmit(app); - - dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); - kfree(app); - garp_release_port(dev); -} -EXPORT_SYMBOL_GPL(garp_uninit_applicant); - -int garp_register_application(struct garp_application *appl) -{ - appl->proto.rcv = garp_pdu_rcv; - appl->proto.data = appl; - return stp_proto_register(&appl->proto); -} -EXPORT_SYMBOL_GPL(garp_register_application); - -void garp_unregister_application(struct garp_application *appl) -{ - stp_proto_unregister(&appl->proto); -} -EXPORT_SYMBOL_GPL(garp_unregister_application); diff --git a/trunk/net/802/stp.c b/trunk/net/802/stp.c deleted file mode 100644 index 0b7a24452d11..000000000000 --- a/trunk/net/802/stp.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * STP SAP demux - * - * Copyright (c) 2008 Patrick McHardy - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include -#include -#include - -/* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */ -#define GARP_ADDR_MIN 0x20 -#define GARP_ADDR_MAX 0x2F -#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN) - -static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly; -static const struct stp_proto *stp_proto __read_mostly; - -static struct llc_sap *sap __read_mostly; -static unsigned int sap_registered; -static DEFINE_MUTEX(stp_proto_mutex); - -/* Called under rcu_read_lock from LLC */ -static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - const struct ethhdr *eh = eth_hdr(skb); - const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); - const struct stp_proto *proto; - - if (pdu->ssap != LLC_SAP_BSPAN || - pdu->dsap != LLC_SAP_BSPAN || - pdu->ctrl_1 != LLC_PDU_TYPE_U) - goto err; - - if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) { - proto = rcu_dereference(garp_protos[eh->h_dest[5] - - GARP_ADDR_MIN]); - if (proto && - compare_ether_addr(eh->h_dest, proto->group_address)) - goto err; - } else - proto = rcu_dereference(stp_proto); - - if (!proto) - goto err; - - proto->rcv(proto, skb, dev); - return 0; - -err: - kfree_skb(skb); - return 0; -} - -int stp_proto_register(const struct stp_proto *proto) -{ - int err = 0; - - mutex_lock(&stp_proto_mutex); - if (sap_registered++ == 0) { - sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv); - if (!sap) { - err = -ENOMEM; - goto out; - } - } - if (is_zero_ether_addr(proto->group_address)) - rcu_assign_pointer(stp_proto, proto); - else - rcu_assign_pointer(garp_protos[proto->group_address[5] - - GARP_ADDR_MIN], proto); -out: - mutex_unlock(&stp_proto_mutex); - return err; -} -EXPORT_SYMBOL_GPL(stp_proto_register); - -void stp_proto_unregister(const struct stp_proto *proto) -{ - mutex_lock(&stp_proto_mutex); - if (is_zero_ether_addr(proto->group_address)) - rcu_assign_pointer(stp_proto, NULL); - else - rcu_assign_pointer(garp_protos[proto->group_address[5] - - GARP_ADDR_MIN], NULL); - synchronize_rcu(); - - if (--sap_registered == 0) - llc_sap_put(sap); - mutex_unlock(&stp_proto_mutex); -} -EXPORT_SYMBOL_GPL(stp_proto_unregister); - -MODULE_LICENSE("GPL"); diff --git a/trunk/net/8021q/Kconfig b/trunk/net/8021q/Kconfig index fa073a54963e..c4a382e450e2 100644 --- a/trunk/net/8021q/Kconfig +++ b/trunk/net/8021q/Kconfig @@ -17,13 +17,3 @@ config VLAN_8021Q will be called 8021q. If unsure, say N. - -config VLAN_8021Q_GVRP - bool "GVRP (GARP VLAN Registration Protocol) support" - depends on VLAN_8021Q - select GARP - help - Select this to enable GVRP end-system support. GVRP is used for - automatic propagation of registered VLANs to switches. - - If unsure, say N. diff --git a/trunk/net/8021q/Makefile b/trunk/net/8021q/Makefile index 3006e9ed7b08..10ca7f486c3a 100644 --- a/trunk/net/8021q/Makefile +++ b/trunk/net/8021q/Makefile @@ -4,6 +4,9 @@ obj-$(CONFIG_VLAN_8021Q) += 8021q.o -8021q-y := vlan.o vlan_dev.o vlan_netlink.o -8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o -8021q-$(CONFIG_PROC_FS) += vlanproc.o +8021q-objs := vlan.o vlan_dev.o vlan_netlink.o + +ifeq ($(CONFIG_PROC_FS),y) +8021q-objs += vlanproc.o +endif + diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index b529110c9355..ab2225da0ee2 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -165,12 +165,8 @@ void unregister_vlan_dev(struct net_device *dev) synchronize_net(); - unregister_netdevice(dev); - /* If the group is now empty, kill off the group. */ if (grp->nr_vlans == 0) { - vlan_gvrp_uninit_applicant(real_dev); - if (real_dev->features & NETIF_F_HW_VLAN_RX) real_dev->vlan_rx_register(real_dev, NULL); @@ -182,6 +178,8 @@ void unregister_vlan_dev(struct net_device *dev) /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); + + unregister_netdevice(dev); } static void vlan_transfer_operstate(const struct net_device *dev, @@ -251,18 +249,15 @@ int register_vlan_dev(struct net_device *dev) ngrp = grp = vlan_group_alloc(real_dev); if (!grp) return -ENOBUFS; - err = vlan_gvrp_init_applicant(real_dev); - if (err < 0) - goto out_free_group; } err = vlan_group_prealloc_vid(grp, vlan_id); if (err < 0) - goto out_uninit_applicant; + goto out_free_group; err = register_netdevice(dev); if (err < 0) - goto out_uninit_applicant; + goto out_free_group; /* Account for reference in struct vlan_dev_info */ dev_hold(real_dev); @@ -283,9 +278,6 @@ int register_vlan_dev(struct net_device *dev) return 0; -out_uninit_applicant: - if (ngrp) - vlan_gvrp_uninit_applicant(real_dev); out_free_group: if (ngrp) vlan_group_free(ngrp); @@ -599,9 +591,9 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; - err = vlan_dev_change_flags(dev, - args.vlan_qos ? args.u.flag : 0, - args.u.flag); + err = vlan_dev_set_vlan_flag(dev, + args.u.flag, + args.vlan_qos); break; case SET_VLAN_NAME_TYPE_CMD: @@ -721,20 +713,14 @@ static int __init vlan_proto_init(void) if (err < 0) goto err2; - err = vlan_gvrp_init(); - if (err < 0) - goto err3; - err = vlan_netlink_init(); if (err < 0) - goto err4; + goto err3; dev_add_pack(&vlan_packet_type); vlan_ioctl_set(vlan_ioctl_handler); return 0; -err4: - vlan_gvrp_uninit(); err3: unregister_netdevice_notifier(&vlan_notifier_block); err2: @@ -759,9 +745,8 @@ static void __exit vlan_cleanup_module(void) BUG_ON(!hlist_empty(&vlan_group_hash[i])); unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); - synchronize_net(); - vlan_gvrp_uninit(); + synchronize_net(); } module_init(vlan_proto_init); diff --git a/trunk/net/8021q/vlan.h b/trunk/net/8021q/vlan.h index 097b2e04c928..5229a72c7ea1 100644 --- a/trunk/net/8021q/vlan.h +++ b/trunk/net/8021q/vlan.h @@ -28,7 +28,8 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, short vlan_prio); int vlan_dev_set_egress_priority(const struct net_device *dev, u32 skb_prio, short vlan_prio); -int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); +int vlan_dev_set_vlan_flag(const struct net_device *dev, + u32 flag, short flag_val); void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result); @@ -37,22 +38,6 @@ void vlan_setup(struct net_device *dev); int register_vlan_dev(struct net_device *dev); void unregister_vlan_dev(struct net_device *dev); -#ifdef CONFIG_VLAN_8021Q_GVRP -extern int vlan_gvrp_request_join(const struct net_device *dev); -extern void vlan_gvrp_request_leave(const struct net_device *dev); -extern int vlan_gvrp_init_applicant(struct net_device *dev); -extern void vlan_gvrp_uninit_applicant(struct net_device *dev); -extern int vlan_gvrp_init(void); -extern void vlan_gvrp_uninit(void); -#else -static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; } -static inline void vlan_gvrp_request_leave(const struct net_device *dev) {} -static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; } -static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {} -static inline int vlan_gvrp_init(void) { return 0; } -static inline void vlan_gvrp_uninit(void) {} -#endif - int vlan_netlink_init(void); void vlan_netlink_fini(void); diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index a0617bf7cec6..5d055c242ed8 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -507,23 +507,18 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, } /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ -int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) +int vlan_dev_set_vlan_flag(const struct net_device *dev, + u32 flag, short flag_val) { - struct vlan_dev_info *vlan = vlan_dev_info(dev); - u32 old_flags = vlan->flags; - - if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) - return -EINVAL; - - vlan->flags = (old_flags & ~mask) | (flags & mask); - - if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) { - if (vlan->flags & VLAN_FLAG_GVRP) - vlan_gvrp_request_join(dev); + /* verify flag is supported */ + if (flag == VLAN_FLAG_REORDER_HDR) { + if (flag_val) + vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR; else - vlan_gvrp_request_leave(dev); + vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR; + return 0; } - return 0; + return -EINVAL; } void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) @@ -557,19 +552,12 @@ static int vlan_dev_open(struct net_device *dev) if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, 1); - if (vlan->flags & VLAN_FLAG_GVRP) - vlan_gvrp_request_join(dev); - return 0; } static int vlan_dev_stop(struct net_device *dev) { - struct vlan_dev_info *vlan = vlan_dev_info(dev); - struct net_device *real_dev = vlan->real_dev; - - if (vlan->flags & VLAN_FLAG_GVRP) - vlan_gvrp_request_leave(dev); + struct net_device *real_dev = vlan_dev_info(dev)->real_dev; dev_mc_unsync(real_dev, dev); dev_unicast_unsync(real_dev, dev); diff --git a/trunk/net/8021q/vlan_gvrp.c b/trunk/net/8021q/vlan_gvrp.c deleted file mode 100644 index db9781608362..000000000000 --- a/trunk/net/8021q/vlan_gvrp.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP) - * - * Copyright (c) 2008 Patrick McHardy - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - */ -#include -#include -#include -#include "vlan.h" - -#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } - -enum gvrp_attributes { - GVRP_ATTR_INVALID, - GVRP_ATTR_VID, - __GVRP_ATTR_MAX -}; -#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1) - -static struct garp_application vlan_gvrp_app __read_mostly = { - .proto.group_address = GARP_GVRP_ADDRESS, - .maxattr = GVRP_ATTR_MAX, - .type = GARP_APPLICATION_GVRP, -}; - -int vlan_gvrp_request_join(const struct net_device *dev) -{ - const struct vlan_dev_info *vlan = vlan_dev_info(dev); - __be16 vid = htons(vlan->vlan_id); - - return garp_request_join(vlan->real_dev, &vlan_gvrp_app, - &vid, sizeof(vid), GVRP_ATTR_VID); -} - -void vlan_gvrp_request_leave(const struct net_device *dev) -{ - const struct vlan_dev_info *vlan = vlan_dev_info(dev); - __be16 vid = htons(vlan->vlan_id); - - garp_request_leave(vlan->real_dev, &vlan_gvrp_app, - &vid, sizeof(vid), GVRP_ATTR_VID); -} - -int vlan_gvrp_init_applicant(struct net_device *dev) -{ - return garp_init_applicant(dev, &vlan_gvrp_app); -} - -void vlan_gvrp_uninit_applicant(struct net_device *dev) -{ - garp_uninit_applicant(dev, &vlan_gvrp_app); -} - -int __init vlan_gvrp_init(void) -{ - return garp_register_application(&vlan_gvrp_app); -} - -void vlan_gvrp_uninit(void) -{ - garp_unregister_application(&vlan_gvrp_app); -} diff --git a/trunk/net/8021q/vlan_netlink.c b/trunk/net/8021q/vlan_netlink.c index e9c91dcecc9b..c93e69ec28ed 100644 --- a/trunk/net/8021q/vlan_netlink.c +++ b/trunk/net/8021q/vlan_netlink.c @@ -59,8 +59,7 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) } if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); - if ((flags->flags & flags->mask) & - ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP)) + if ((flags->flags & flags->mask) & ~VLAN_FLAG_REORDER_HDR) return -EINVAL; } @@ -76,6 +75,7 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { + struct vlan_dev_info *vlan = vlan_dev_info(dev); struct ifla_vlan_flags *flags; struct ifla_vlan_qos_mapping *m; struct nlattr *attr; @@ -83,7 +83,8 @@ static int vlan_changelink(struct net_device *dev, if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); - vlan_dev_change_flags(dev, flags->flags, flags->mask); + vlan->flags = (vlan->flags & ~flags->mask) | + (flags->flags & flags->mask); } if (data[IFLA_VLAN_INGRESS_QOS]) { nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { diff --git a/trunk/net/Kconfig b/trunk/net/Kconfig index b98668751749..acbf7c60e89b 100644 --- a/trunk/net/Kconfig +++ b/trunk/net/Kconfig @@ -181,7 +181,6 @@ source "net/dccp/Kconfig" source "net/sctp/Kconfig" source "net/tipc/Kconfig" source "net/atm/Kconfig" -source "net/802/Kconfig" source "net/bridge/Kconfig" source "net/8021q/Kconfig" source "net/decnet/Kconfig" diff --git a/trunk/net/bridge/Kconfig b/trunk/net/bridge/Kconfig index e143ca678881..12265aff7099 100644 --- a/trunk/net/bridge/Kconfig +++ b/trunk/net/bridge/Kconfig @@ -5,7 +5,6 @@ config BRIDGE tristate "802.1d Ethernet Bridging" select LLC - select STP ---help--- If you say Y here, then your Linux box will be able to act as an Ethernet bridge, which means that the different Ethernet segments it diff --git a/trunk/net/bridge/br.c b/trunk/net/bridge/br.c index 573acdf6f9ff..cede010f4ddd 100644 --- a/trunk/net/bridge/br.c +++ b/trunk/net/bridge/br.c @@ -18,24 +18,21 @@ #include #include #include -#include #include "br_private.h" int (*br_should_route_hook)(struct sk_buff *skb); -static const struct stp_proto br_stp_proto = { - .rcv = br_stp_rcv, -}; +static struct llc_sap *br_stp_sap; static int __init br_init(void) { int err; - err = stp_proto_register(&br_stp_proto); - if (err < 0) { + br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv); + if (!br_stp_sap) { printk(KERN_ERR "bridge: can't register sap for STP\n"); - return err; + return -EADDRINUSE; } err = br_fdb_init(); @@ -68,13 +65,13 @@ static int __init br_init(void) err_out1: br_fdb_fini(); err_out: - stp_proto_unregister(&br_stp_proto); + llc_sap_put(br_stp_sap); return err; } static void __exit br_deinit(void) { - stp_proto_unregister(&br_stp_proto); + rcu_assign_pointer(br_stp_sap->rcv_func, NULL); br_netlink_fini(); unregister_netdevice_notifier(&br_device_notifier); @@ -85,6 +82,7 @@ static void __exit br_deinit(void) synchronize_net(); br_netfilter_fini(); + llc_sap_put(br_stp_sap); br_fdb_get_hook = NULL; br_fdb_put_hook = NULL; diff --git a/trunk/net/bridge/br_private.h b/trunk/net/bridge/br_private.h index 815ed38925b2..8593c9f6a302 100644 --- a/trunk/net/bridge/br_private.h +++ b/trunk/net/bridge/br_private.h @@ -226,9 +226,8 @@ extern void br_stp_set_path_cost(struct net_bridge_port *p, extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); /* br_stp_bpdu.c */ -struct stp_proto; -extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, - struct net_device *dev); +extern int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); /* br_stp_timer.c */ extern void br_stp_timer_init(struct net_bridge *br); diff --git a/trunk/net/bridge/br_stp_bpdu.c b/trunk/net/bridge/br_stp_bpdu.c index 996476174517..9dc2de656965 100644 --- a/trunk/net/bridge/br_stp_bpdu.c +++ b/trunk/net/bridge/br_stp_bpdu.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include "br_private.h" @@ -132,9 +131,10 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) * * NO locks, but rcu_read_lock (preempt_disabled) */ -void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, - struct net_device *dev) +int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) { + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); const unsigned char *dest = eth_hdr(skb)->h_dest; struct net_bridge_port *p = rcu_dereference(dev->br_port); struct net_bridge *br; @@ -146,6 +146,11 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, if (!p) goto err; + if (pdu->ssap != LLC_SAP_BSPAN + || pdu->dsap != LLC_SAP_BSPAN + || pdu->ctrl_1 != LLC_PDU_TYPE_U) + goto err; + if (!pskb_may_pull(skb, 4)) goto err; @@ -219,4 +224,5 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, spin_unlock(&br->lock); err: kfree_skb(skb); + return 0; } diff --git a/trunk/net/core/ethtool.c b/trunk/net/core/ethtool.c index 14ada537f895..0133b5ebd545 100644 --- a/trunk/net/core/ethtool.c +++ b/trunk/net/core/ethtool.c @@ -209,36 +209,6 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) return 0; } -static int ethtool_set_rxhash(struct net_device *dev, void __user *useraddr) -{ - struct ethtool_rxnfc cmd; - - if (!dev->ethtool_ops->set_rxhash) - return -EOPNOTSUPP; - - if (copy_from_user(&cmd, useraddr, sizeof(cmd))) - return -EFAULT; - - return dev->ethtool_ops->set_rxhash(dev, &cmd); -} - -static int ethtool_get_rxhash(struct net_device *dev, void __user *useraddr) -{ - struct ethtool_rxnfc info; - - if (!dev->ethtool_ops->get_rxhash) - return -EOPNOTSUPP; - - if (copy_from_user(&info, useraddr, sizeof(info))) - return -EFAULT; - - dev->ethtool_ops->get_rxhash(dev, &info); - - if (copy_to_user(useraddr, &info, sizeof(info))) - return -EFAULT; - return 0; -} - static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) { struct ethtool_regs regs; @@ -856,7 +826,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GGSO: case ETHTOOL_GFLAGS: case ETHTOOL_GPFLAGS: - case ETHTOOL_GRXFH: break; default: if (!capable(CAP_NET_ADMIN)) @@ -1008,12 +977,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) rc = ethtool_set_value(dev, useraddr, dev->ethtool_ops->set_priv_flags); break; - case ETHTOOL_GRXFH: - rc = ethtool_get_rxhash(dev, useraddr); - break; - case ETHTOOL_SRXFH: - rc = ethtool_set_rxhash(dev, useraddr); - break; default: rc = -EOPNOTSUPP; } diff --git a/trunk/net/core/fib_rules.c b/trunk/net/core/fib_rules.c index 1c2943a119f3..e3e9ab0f74e3 100644 --- a/trunk/net/core/fib_rules.c +++ b/trunk/net/core/fib_rules.c @@ -69,7 +69,7 @@ static void rules_ops_put(struct fib_rules_ops *ops) static void flush_route_cache(struct fib_rules_ops *ops) { if (ops->flush_cache) - ops->flush_cache(ops); + ops->flush_cache(); } int fib_rules_register(struct fib_rules_ops *ops) diff --git a/trunk/net/decnet/dn_rules.c b/trunk/net/decnet/dn_rules.c index 14fbca55e908..5b7539b7fe0c 100644 --- a/trunk/net/decnet/dn_rules.c +++ b/trunk/net/decnet/dn_rules.c @@ -229,7 +229,7 @@ static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops) return 0; } -static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) +static void dn_fib_rule_flush_cache(void) { dn_rt_cache_flush(-1); } diff --git a/trunk/net/ipv4/af_inet.c b/trunk/net/ipv4/af_inet.c index dc411335c14f..42bd24b64b57 100644 --- a/trunk/net/ipv4/af_inet.c +++ b/trunk/net/ipv4/af_inet.c @@ -1479,15 +1479,14 @@ static int __init inet_init(void) * Initialise the multicast router */ #if defined(CONFIG_IP_MROUTE) - if (ip_mr_init()) - printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n"); + ip_mr_init(); #endif /* * Initialise per-cpu ipv4 mibs */ if (init_ipv4_mibs()) - printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); + printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; ipv4_proc_init(); diff --git a/trunk/net/ipv4/arp.c b/trunk/net/ipv4/arp.c index 29df75a6bcc7..20c515a1be28 100644 --- a/trunk/net/ipv4/arp.c +++ b/trunk/net/ipv4/arp.c @@ -1197,7 +1197,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo switch (event) { case NETDEV_CHANGEADDR: neigh_changeaddr(&arp_tbl, dev); - rt_cache_flush(dev_net(dev), 0); + rt_cache_flush(0); break; default: break; diff --git a/trunk/net/ipv4/devinet.c b/trunk/net/ipv4/devinet.c index 2e667e2f90df..9de2514946ca 100644 --- a/trunk/net/ipv4/devinet.c +++ b/trunk/net/ipv4/devinet.c @@ -1348,7 +1348,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, dev_disable_lro(idev->dev); } rtnl_unlock(); - rt_cache_flush(net, 0); + rt_cache_flush(0); } } @@ -1362,10 +1362,9 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write, int *valp = ctl->data; int val = *valp; int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); - struct net *net = ctl->extra2; if (write && *valp != val) - rt_cache_flush(net, 0); + rt_cache_flush(0); return ret; } @@ -1376,10 +1375,9 @@ int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen, { int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp, newval, newlen); - struct net *net = table->extra2; if (ret == 1) - rt_cache_flush(net, 0); + rt_cache_flush(0); return ret; } diff --git a/trunk/net/ipv4/fib_frontend.c b/trunk/net/ipv4/fib_frontend.c index 65c1503f8cc8..5ad01d63f83b 100644 --- a/trunk/net/ipv4/fib_frontend.c +++ b/trunk/net/ipv4/fib_frontend.c @@ -144,7 +144,7 @@ static void fib_flush(struct net *net) } if (flushed) - rt_cache_flush(net, -1); + rt_cache_flush(-1); } /* @@ -897,22 +897,21 @@ static void fib_disable_ip(struct net_device *dev, int force) { if (fib_sync_down_dev(dev, force)) fib_flush(dev_net(dev)); - rt_cache_flush(dev_net(dev), 0); + rt_cache_flush(0); arp_ifdown(dev); } static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr*)ptr; - struct net_device *dev = ifa->ifa_dev->dev; switch (event) { case NETDEV_UP: fib_add_ifaddr(ifa); #ifdef CONFIG_IP_ROUTE_MULTIPATH - fib_sync_up(dev); + fib_sync_up(ifa->ifa_dev->dev); #endif - rt_cache_flush(dev_net(dev), -1); + rt_cache_flush(-1); break; case NETDEV_DOWN: fib_del_ifaddr(ifa); @@ -920,9 +919,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, /* Last address was deleted from this interface. Disable IP. */ - fib_disable_ip(dev, 1); + fib_disable_ip(ifa->ifa_dev->dev, 1); } else { - rt_cache_flush(dev_net(dev), -1); + rt_cache_flush(-1); } break; } @@ -950,14 +949,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo #ifdef CONFIG_IP_ROUTE_MULTIPATH fib_sync_up(dev); #endif - rt_cache_flush(dev_net(dev), -1); + rt_cache_flush(-1); break; case NETDEV_DOWN: fib_disable_ip(dev, 0); break; case NETDEV_CHANGEMTU: case NETDEV_CHANGE: - rt_cache_flush(dev_net(dev), 0); + rt_cache_flush(0); break; } return NOTIFY_DONE; diff --git a/trunk/net/ipv4/fib_hash.c b/trunk/net/ipv4/fib_hash.c index c8cac6c7f881..eeec4bf982b8 100644 --- a/trunk/net/ipv4/fib_hash.c +++ b/trunk/net/ipv4/fib_hash.c @@ -472,7 +472,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) fib_release_info(fi_drop); if (state & FA_S_ACCESSED) - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(-1); rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); return 0; @@ -532,7 +532,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) if (new_f) fz->fz_nent++; - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(-1); rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id, &cfg->fc_nlinfo, 0); @@ -614,7 +614,7 @@ static int fn_hash_delete(struct fib_table *tb, struct fib_config *cfg) write_unlock_bh(&fib_hash_lock); if (fa->fa_state & FA_S_ACCESSED) - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(-1); fn_free_alias(fa, f); if (kill_fn) { fn_free_node(f); diff --git a/trunk/net/ipv4/fib_rules.c b/trunk/net/ipv4/fib_rules.c index 6080d7120821..1fb56876be54 100644 --- a/trunk/net/ipv4/fib_rules.c +++ b/trunk/net/ipv4/fib_rules.c @@ -258,9 +258,9 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) + nla_total_size(4); /* flow */ } -static void fib4_rule_flush_cache(struct fib_rules_ops *ops) +static void fib4_rule_flush_cache(void) { - rt_cache_flush(ops->fro_net, -1); + rt_cache_flush(-1); } static struct fib_rules_ops fib4_rules_ops_template = { diff --git a/trunk/net/ipv4/fib_trie.c b/trunk/net/ipv4/fib_trie.c index d16ae4623be6..394db9c941a1 100644 --- a/trunk/net/ipv4/fib_trie.c +++ b/trunk/net/ipv4/fib_trie.c @@ -1271,7 +1271,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) fib_release_info(fi_drop); if (state & FA_S_ACCESSED) - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(-1); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); @@ -1316,7 +1316,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) list_add_tail_rcu(&new_fa->fa_list, (fa ? &fa->fa_list : fa_head)); - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(-1); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, 0); succeeded: @@ -1664,7 +1664,7 @@ static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg) trie_leaf_remove(t, l); if (fa->fa_state & FA_S_ACCESSED) - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(-1); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); diff --git a/trunk/net/ipv4/ipmr.c b/trunk/net/ipv4/ipmr.c index 438fab9c62a0..300ab0c2919e 100644 --- a/trunk/net/ipv4/ipmr.c +++ b/trunk/net/ipv4/ipmr.c @@ -1878,36 +1878,16 @@ static struct net_protocol pim_protocol = { * Setup for IP multicast routing */ -int __init ip_mr_init(void) +void __init ip_mr_init(void) { - int err; - mrt_cachep = kmem_cache_create("ip_mrt_cache", sizeof(struct mfc_cache), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - if (!mrt_cachep) - return -ENOMEM; - setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); - err = register_netdevice_notifier(&ip_mr_notifier); - if (err) - goto reg_notif_fail; + register_netdevice_notifier(&ip_mr_notifier); #ifdef CONFIG_PROC_FS - err = -ENOMEM; - if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops)) - goto proc_vif_fail; - if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops)) - goto proc_cache_fail; + proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops); + proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops); #endif - return 0; -reg_notif_fail: - kmem_cache_destroy(mrt_cachep); -#ifdef CONFIG_PROC_FS -proc_vif_fail: - unregister_netdevice_notifier(&ip_mr_notifier); -proc_cache_fail: - proc_net_remove(&init_net, "ip_mr_vif"); -#endif - return err; } diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 113cd2512ba7..fe3a02237286 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -132,6 +132,7 @@ static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; static void rt_worker_func(struct work_struct *work); static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); +static struct timer_list rt_secret_timer; /* * Interface to generic destination cache. @@ -250,25 +251,20 @@ static inline void rt_hash_lock_init(void) static struct rt_hash_bucket *rt_hash_table __read_mostly; static unsigned rt_hash_mask __read_mostly; static unsigned int rt_hash_log __read_mostly; +static atomic_t rt_genid __read_mostly; static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) \ (__raw_get_cpu_var(rt_cache_stat).field++) -static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, - int genid) +static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx) { return jhash_3words((__force u32)(__be32)(daddr), (__force u32)(__be32)(saddr), - idx, genid) + idx, atomic_read(&rt_genid)) & rt_hash_mask; } -static inline int rt_genid(struct net *net) -{ - return atomic_read(&net->ipv4.rt_genid); -} - #ifdef CONFIG_PROC_FS struct rt_cache_iter_state { struct seq_net_private p; @@ -338,7 +334,7 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) struct rt_cache_iter_state *st = seq->private; if (*pos) return rt_cache_get_idx(seq, *pos - 1); - st->genid = rt_genid(seq_file_net(seq)); + st->genid = atomic_read(&rt_genid); return SEQ_START_TOKEN; } @@ -685,11 +681,6 @@ static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev); } -static inline int rt_is_expired(struct rtable *rth) -{ - return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); -} - /* * Perform a full scan of hash table and free all entries. * Can be called by a softirq or a process. @@ -699,7 +690,6 @@ static void rt_do_flush(int process_context) { unsigned int i; struct rtable *rth, *next; - struct rtable * tail; for (i = 0; i <= rt_hash_mask; i++) { if (process_context && need_resched()) @@ -709,39 +699,11 @@ static void rt_do_flush(int process_context) continue; spin_lock_bh(rt_hash_lock_addr(i)); -#ifdef CONFIG_NET_NS - { - struct rtable ** prev, * p; - - rth = rt_hash_table[i].chain; - - /* defer releasing the head of the list after spin_unlock */ - for (tail = rth; tail; tail = tail->u.dst.rt_next) - if (!rt_is_expired(tail)) - break; - if (rth != tail) - rt_hash_table[i].chain = tail; - - /* call rt_free on entries after the tail requiring flush */ - prev = &rt_hash_table[i].chain; - for (p = *prev; p; p = next) { - next = p->u.dst.rt_next; - if (!rt_is_expired(p)) { - prev = &p->u.dst.rt_next; - } else { - *prev = next; - rt_free(p); - } - } - } -#else rth = rt_hash_table[i].chain; rt_hash_table[i].chain = NULL; - tail = NULL; -#endif spin_unlock_bh(rt_hash_lock_addr(i)); - for (; rth != tail; rth = next) { + for (; rth; rth = next) { next = rth->u.dst.rt_next; rt_free(rth); } @@ -774,7 +736,7 @@ static void rt_check_expire(void) continue; spin_lock_bh(rt_hash_lock_addr(i)); while ((rth = *rthp) != NULL) { - if (rt_is_expired(rth)) { + if (rth->rt_genid != atomic_read(&rt_genid)) { *rthp = rth->u.dst.rt_next; rt_free(rth); continue; @@ -817,21 +779,21 @@ static void rt_worker_func(struct work_struct *work) * many times (2^24) without giving recent rt_genid. * Jenkins hash is strong enough that litle changes of rt_genid are OK. */ -static void rt_cache_invalidate(struct net *net) +static void rt_cache_invalidate(void) { unsigned char shuffle; get_random_bytes(&shuffle, sizeof(shuffle)); - atomic_add(shuffle + 1U, &net->ipv4.rt_genid); + atomic_add(shuffle + 1U, &rt_genid); } /* * delay < 0 : invalidate cache (fast : entries will be deleted later) * delay >= 0 : invalidate & flush cache (can be long) */ -void rt_cache_flush(struct net *net, int delay) +void rt_cache_flush(int delay) { - rt_cache_invalidate(net); + rt_cache_invalidate(); if (delay >= 0) rt_do_flush(!in_softirq()); } @@ -839,11 +801,10 @@ void rt_cache_flush(struct net *net, int delay) /* * We change rt_genid and let gc do the cleanup */ -static void rt_secret_rebuild(unsigned long __net) +static void rt_secret_rebuild(unsigned long dummy) { - struct net *net = (struct net *)__net; - rt_cache_invalidate(net); - mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); + rt_cache_invalidate(); + mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); } /* @@ -919,7 +880,7 @@ static int rt_garbage_collect(struct dst_ops *ops) rthp = &rt_hash_table[k].chain; spin_lock_bh(rt_hash_lock_addr(k)); while ((rth = *rthp) != NULL) { - if (!rt_is_expired(rth) && + if (rth->rt_genid == atomic_read(&rt_genid) && !rt_may_expire(rth, tmo, expire)) { tmo >>= 1; rthp = &rth->u.dst.rt_next; @@ -1001,7 +962,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) spin_lock_bh(rt_hash_lock_addr(hash)); while ((rth = *rthp) != NULL) { - if (rt_is_expired(rth)) { + if (rth->rt_genid != atomic_read(&rt_genid)) { *rthp = rth->u.dst.rt_next; rt_free(rth); continue; @@ -1177,7 +1138,7 @@ static void rt_del(unsigned hash, struct rtable *rt) spin_lock_bh(rt_hash_lock_addr(hash)); ip_rt_put(rt); while ((aux = *rthp) != NULL) { - if (aux == rt || rt_is_expired(aux)) { + if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) { *rthp = aux->u.dst.rt_next; rt_free(aux); continue; @@ -1219,8 +1180,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, for (i = 0; i < 2; i++) { for (k = 0; k < 2; k++) { - unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], - rt_genid(net)); + unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); rthp=&rt_hash_table[hash].chain; @@ -1232,7 +1192,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rth->fl.fl4_src != skeys[i] || rth->fl.oif != ikeys[k] || rth->fl.iif != 0 || - rt_is_expired(rth) || + rth->rt_genid != atomic_read(&rt_genid) || !net_eq(dev_net(rth->u.dst.dev), net)) { rthp = &rth->u.dst.rt_next; continue; @@ -1271,7 +1231,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rt->u.dst.neighbour = NULL; rt->u.dst.hh = NULL; rt->u.dst.xfrm = NULL; - rt->rt_genid = rt_genid(net); + rt->rt_genid = atomic_read(&rt_genid); rt->rt_flags |= RTCF_REDIRECTED; /* Gateway is different ... */ @@ -1335,8 +1295,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) } else if ((rt->rt_flags & RTCF_REDIRECTED) || rt->u.dst.expires) { unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, - rt->fl.oif, - rt_genid(dev_net(dst->dev))); + rt->fl.oif); #if RT_CACHE_DEBUG >= 1 printk(KERN_DEBUG "ipv4_negative_advice: redirect to " NIPQUAD_FMT "/%02x dropped\n", @@ -1485,8 +1444,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, for (k = 0; k < 2; k++) { for (i = 0; i < 2; i++) { - unsigned hash = rt_hash(daddr, skeys[i], ikeys[k], - rt_genid(net)); + unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]); rcu_read_lock(); for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; @@ -1501,7 +1459,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, rth->fl.iif != 0 || dst_metric_locked(&rth->u.dst, RTAX_MTU) || !net_eq(dev_net(rth->u.dst.dev), net) || - !rt_is_expired(rth)) + rth->rt_genid != atomic_read(&rt_genid)) continue; if (new_mtu < 68 || new_mtu >= old_mtu) { @@ -1736,7 +1694,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->fl.oif = 0; rth->rt_gateway = daddr; rth->rt_spec_dst= spec_dst; - rth->rt_genid = rt_genid(dev_net(dev)); + rth->rt_genid = atomic_read(&rt_genid); rth->rt_flags = RTCF_MULTICAST; rth->rt_type = RTN_MULTICAST; if (our) { @@ -1751,7 +1709,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, RT_CACHE_STAT_INC(in_slow_mc); in_dev_put(in_dev); - hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); + hash = rt_hash(daddr, saddr, dev->ifindex); return rt_intern_hash(hash, rth, &skb->rtable); e_nobufs: @@ -1877,7 +1835,7 @@ static int __mkroute_input(struct sk_buff *skb, rth->u.dst.input = ip_forward; rth->u.dst.output = ip_output; - rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); + rth->rt_genid = atomic_read(&rt_genid); rt_set_nexthop(rth, res, itag); @@ -1912,8 +1870,7 @@ static int ip_mkroute_input(struct sk_buff *skb, return err; /* put it into the cache */ - hash = rt_hash(daddr, saddr, fl->iif, - rt_genid(dev_net(rth->u.dst.dev))); + hash = rt_hash(daddr, saddr, fl->iif); return rt_intern_hash(hash, rth, &skb->rtable); } @@ -2039,7 +1996,7 @@ out: return err; goto e_nobufs; rth->u.dst.output= ip_rt_bug; - rth->rt_genid = rt_genid(net); + rth->rt_genid = atomic_read(&rt_genid); atomic_set(&rth->u.dst.__refcnt, 1); rth->u.dst.flags= DST_HOST; @@ -2069,7 +2026,7 @@ out: return err; rth->rt_flags &= ~RTCF_LOCAL; } rth->rt_type = res.type; - hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); + hash = rt_hash(daddr, saddr, fl.iif); err = rt_intern_hash(hash, rth, &skb->rtable); goto done; @@ -2120,7 +2077,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, net = dev_net(dev); tos &= IPTOS_RT_MASK; - hash = rt_hash(daddr, saddr, iif, rt_genid(net)); + hash = rt_hash(daddr, saddr, iif); rcu_read_lock(); for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; @@ -2132,7 +2089,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, (rth->fl.fl4_tos ^ tos)) == 0 && rth->fl.mark == skb->mark && net_eq(dev_net(rth->u.dst.dev), net) && - !rt_is_expired(rth)) { + rth->rt_genid == atomic_read(&rt_genid)) { dst_use(&rth->u.dst, jiffies); RT_CACHE_STAT_INC(in_hit); rcu_read_unlock(); @@ -2260,7 +2217,7 @@ static int __mkroute_output(struct rtable **result, rth->rt_spec_dst= fl->fl4_src; rth->u.dst.output=ip_output; - rth->rt_genid = rt_genid(dev_net(dev_out)); + rth->rt_genid = atomic_read(&rt_genid); RT_CACHE_STAT_INC(out_slow_tot); @@ -2309,8 +2266,7 @@ static int ip_mkroute_output(struct rtable **rp, int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); unsigned hash; if (err == 0) { - hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, - rt_genid(dev_net(dev_out))); + hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); err = rt_intern_hash(hash, rth, rp); } @@ -2522,7 +2478,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, unsigned hash; struct rtable *rth; - hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); + hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif); rcu_read_lock_bh(); for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; @@ -2535,7 +2491,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, !((rth->fl.fl4_tos ^ flp->fl4_tos) & (IPTOS_RT_MASK | RTO_ONLINK)) && net_eq(dev_net(rth->u.dst.dev), net) && - !rt_is_expired(rth)) { + rth->rt_genid == atomic_read(&rt_genid)) { dst_use(&rth->u.dst, jiffies); RT_CACHE_STAT_INC(out_hit); rcu_read_unlock_bh(); @@ -2566,7 +2522,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { }; -static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp) +static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp) { struct rtable *ort = *rp; struct rtable *rt = (struct rtable *) @@ -2590,7 +2546,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi rt->idev = ort->idev; if (rt->idev) in_dev_hold(rt->idev); - rt->rt_genid = rt_genid(net); + rt->rt_genid = atomic_read(&rt_genid); rt->rt_flags = ort->rt_flags; rt->rt_type = ort->rt_type; rt->rt_dst = ort->rt_dst; @@ -2626,7 +2582,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, err = __xfrm_lookup((struct dst_entry **)rp, flp, sk, flags ? XFRM_LOOKUP_WAIT : 0); if (err == -EREMOTE) - err = ipv4_dst_blackhole(net, rp, flp); + err = ipv4_dst_blackhole(rp, flp); return err; } @@ -2845,7 +2801,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) rt = rcu_dereference(rt->u.dst.rt_next), idx++) { if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) continue; - if (rt_is_expired(rt)) + if (rt->rt_genid != atomic_read(&rt_genid)) continue; skb->dst = dst_clone(&rt->u.dst); if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, @@ -2869,27 +2825,19 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) void ip_rt_multicast_event(struct in_device *in_dev) { - rt_cache_flush(dev_net(in_dev->dev), 0); + rt_cache_flush(0); } #ifdef CONFIG_SYSCTL +static int flush_delay; + static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write) { - int flush_delay; - struct net *net; - static DEFINE_MUTEX(flush_mutex); - - mutex_lock(&flush_mutex); - ctl->data = &flush_delay; proc_dointvec(ctl, write, filp, buffer, lenp, ppos); - ctl->data = NULL; - mutex_unlock(&flush_mutex); - - net = (struct net *)ctl->extra1; - rt_cache_flush(net, flush_delay); + rt_cache_flush(flush_delay); return 0; } @@ -2905,17 +2853,24 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table, size_t newlen) { int delay; - struct net *net; if (newlen != sizeof(int)) return -EINVAL; if (get_user(delay, (int __user *)newval)) return -EFAULT; - net = (struct net *)table->extra1; - rt_cache_flush(net, delay); + rt_cache_flush(delay); return 0; } ctl_table ipv4_route_table[] = { + { + .ctl_name = NET_IPV4_ROUTE_FLUSH, + .procname = "flush", + .data = &flush_delay, + .maxlen = sizeof(int), + .mode = 0200, + .proc_handler = &ipv4_sysctl_rtcache_flush, + .strategy = &ipv4_sysctl_rtcache_flush_strategy, + }, { .ctl_name = NET_IPV4_ROUTE_GC_THRESH, .procname = "gc_thresh", @@ -3054,97 +3009,8 @@ ctl_table ipv4_route_table[] = { }, { .ctl_name = 0 } }; - -static __net_initdata struct ctl_path ipv4_route_path[] = { - { .procname = "net", .ctl_name = CTL_NET, }, - { .procname = "ipv4", .ctl_name = NET_IPV4, }, - { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, - { }, -}; - - -static struct ctl_table ipv4_route_flush_table[] = { - { - .ctl_name = NET_IPV4_ROUTE_FLUSH, - .procname = "flush", - .maxlen = sizeof(int), - .mode = 0200, - .proc_handler = &ipv4_sysctl_rtcache_flush, - .strategy = &ipv4_sysctl_rtcache_flush_strategy, - }, - { .ctl_name = 0 }, -}; - -static __net_init int sysctl_route_net_init(struct net *net) -{ - struct ctl_table *tbl; - - tbl = ipv4_route_flush_table; - if (net != &init_net) { - tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); - if (tbl == NULL) - goto err_dup; - } - tbl[0].extra1 = net; - - net->ipv4.route_hdr = - register_net_sysctl_table(net, ipv4_route_path, tbl); - if (net->ipv4.route_hdr == NULL) - goto err_reg; - return 0; - -err_reg: - if (tbl != ipv4_route_flush_table) - kfree(tbl); -err_dup: - return -ENOMEM; -} - -static __net_exit void sysctl_route_net_exit(struct net *net) -{ - struct ctl_table *tbl; - - tbl = net->ipv4.route_hdr->ctl_table_arg; - unregister_net_sysctl_table(net->ipv4.route_hdr); - BUG_ON(tbl == ipv4_route_flush_table); - kfree(tbl); -} - -static __net_initdata struct pernet_operations sysctl_route_ops = { - .init = sysctl_route_net_init, - .exit = sysctl_route_net_exit, -}; #endif - -static __net_init int rt_secret_timer_init(struct net *net) -{ - atomic_set(&net->ipv4.rt_genid, - (int) ((num_physpages ^ (num_physpages>>8)) ^ - (jiffies ^ (jiffies >> 7)))); - - net->ipv4.rt_secret_timer.function = rt_secret_rebuild; - net->ipv4.rt_secret_timer.data = (unsigned long)net; - init_timer_deferrable(&net->ipv4.rt_secret_timer); - - net->ipv4.rt_secret_timer.expires = - jiffies + net_random() % ip_rt_secret_interval + - ip_rt_secret_interval; - add_timer(&net->ipv4.rt_secret_timer); - return 0; -} - -static __net_exit void rt_secret_timer_exit(struct net *net) -{ - del_timer_sync(&net->ipv4.rt_secret_timer); -} - -static __net_initdata struct pernet_operations rt_secret_timer_ops = { - .init = rt_secret_timer_init, - .exit = rt_secret_timer_exit, -}; - - #ifdef CONFIG_NET_CLS_ROUTE struct ip_rt_acct *ip_rt_acct __read_mostly; #endif /* CONFIG_NET_CLS_ROUTE */ @@ -3163,6 +3029,9 @@ int __init ip_rt_init(void) { int rc = 0; + atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^ + (jiffies ^ (jiffies >> 7)))); + #ifdef CONFIG_NET_CLS_ROUTE ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); if (!ip_rt_acct) @@ -3194,14 +3063,19 @@ int __init ip_rt_init(void) devinet_init(); ip_fib_init(); + rt_secret_timer.function = rt_secret_rebuild; + rt_secret_timer.data = 0; + init_timer_deferrable(&rt_secret_timer); + /* All the timers, started at system startup tend to synchronize. Perturb it a bit. */ schedule_delayed_work(&expires_work, net_random() % ip_rt_gc_interval + ip_rt_gc_interval); - if (register_pernet_subsys(&rt_secret_timer_ops)) - printk(KERN_ERR "Unable to setup rt_secret_timer\n"); + rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval + + ip_rt_secret_interval; + add_timer(&rt_secret_timer); if (ip_rt_proc_init()) printk(KERN_ERR "Unable to create route proc files\n"); @@ -3211,9 +3085,6 @@ int __init ip_rt_init(void) #endif rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); -#ifdef CONFIG_SYSCTL - register_pernet_subsys(&sysctl_route_ops); -#endif return rc; } diff --git a/trunk/net/ipv4/sysctl_net_ipv4.c b/trunk/net/ipv4/sysctl_net_ipv4.c index 14ef202a2254..901607003205 100644 --- a/trunk/net/ipv4/sysctl_net_ipv4.c +++ b/trunk/net/ipv4/sysctl_net_ipv4.c @@ -793,8 +793,7 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_icmp_ratelimit, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_ms_jiffies, - .strategy = &sysctl_ms_jiffies + .proc_handler = &proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_RATEMASK, diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index d6ea970a1513..de30e70ff256 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -947,21 +947,17 @@ static void tcp_update_reordering(struct sock *sk, const int metric, { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { - int mib_idx; - tp->reordering = min(TCP_MAX_REORDERING, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) - mib_idx = LINUX_MIB_TCPTSREORDER; + NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); else if (tcp_is_reno(tp)) - mib_idx = LINUX_MIB_TCPRENOREORDER; + NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); else if (tcp_is_fack(tp)) - mib_idx = LINUX_MIB_TCPFACKREORDER; + NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); else - mib_idx = LINUX_MIB_TCPSACKREORDER; - - NET_INC_STATS_BH(mib_idx); + NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); #if FASTRETRANS_DEBUG > 1 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, @@ -1460,22 +1456,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { - int mib_idx; - if (dup_sack) { if (!tp->undo_marker) - mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); else - mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; - mib_idx = LINUX_MIB_TCPSACKDISCARD; + NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); } - - NET_INC_STATS_BH(mib_idx); if (i == 0) first_sack_index = -1; continue; @@ -2388,19 +2380,15 @@ static int tcp_try_undo_recovery(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { - int mib_idx; - /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwr(sk, 1); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) - mib_idx = LINUX_MIB_TCPLOSSUNDO; + NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); else - mib_idx = LINUX_MIB_TCPFULLUNDO; - - NET_INC_STATS_BH(mib_idx); + NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); tp->undo_marker = 0; } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { @@ -2572,7 +2560,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); - int fast_rexmit = 0, mib_idx; + int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; @@ -2695,11 +2683,9 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) /* Otherwise enter Recovery state */ if (tcp_is_reno(tp)) - mib_idx = LINUX_MIB_TCPRENORECOVERY; + NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); else - mib_idx = LINUX_MIB_TCPSACKRECOVERY; - - NET_INC_STATS_BH(mib_idx); + NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); tp->high_seq = tp->snd_nxt; tp->prior_ssthresh = 0; @@ -3714,14 +3700,10 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) { if (tcp_is_sack(tp) && sysctl_tcp_dsack) { - int mib_idx; - if (before(seq, tp->rcv_nxt)) - mib_idx = LINUX_MIB_TCPDSACKOLDSENT; + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); else - mib_idx = LINUX_MIB_TCPDSACKOFOSENT; - - NET_INC_STATS_BH(mib_idx); + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index edef2afe905e..8f83ab432705 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -1985,17 +1985,14 @@ void tcp_xmit_retransmit_queue(struct sock *sk) if (sacked & TCPCB_LOST) { if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { - int mib_idx; - if (tcp_retransmit_skb(sk, skb)) { tp->retransmit_skb_hint = NULL; return; } if (icsk->icsk_ca_state != TCP_CA_Loss) - mib_idx = LINUX_MIB_TCPFASTRETRANS; + NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); else - mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; - NET_INC_STATS_BH(mib_idx); + NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, diff --git a/trunk/net/ipv4/tcp_timer.c b/trunk/net/ipv4/tcp_timer.c index 6a480d1fd8f6..3e358cbb1247 100644 --- a/trunk/net/ipv4/tcp_timer.c +++ b/trunk/net/ipv4/tcp_timer.c @@ -326,27 +326,24 @@ static void tcp_retransmit_timer(struct sock *sk) goto out; if (icsk->icsk_retransmits == 0) { - int mib_idx; - if (icsk->icsk_ca_state == TCP_CA_Disorder || icsk->icsk_ca_state == TCP_CA_Recovery) { if (tcp_is_sack(tp)) { if (icsk->icsk_ca_state == TCP_CA_Recovery) - mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; + NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); else - mib_idx = LINUX_MIB_TCPSACKFAILURES; + NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); } else { if (icsk->icsk_ca_state == TCP_CA_Recovery) - mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; + NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); else - mib_idx = LINUX_MIB_TCPRENOFAILURES; + NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); } } else if (icsk->icsk_ca_state == TCP_CA_Loss) { - mib_idx = LINUX_MIB_TCPLOSSFAILURES; + NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); } else { - mib_idx = LINUX_MIB_TCPTIMEOUTS; + NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); } - NET_INC_STATS_BH(mib_idx); } if (tcp_use_frto(sk)) { diff --git a/trunk/net/ipv4/udp.c b/trunk/net/ipv4/udp.c index 7187121e922d..3bbf6fb6e4f5 100644 --- a/trunk/net/ipv4/udp.c +++ b/trunk/net/ipv4/udp.c @@ -526,8 +526,7 @@ static int udp_push_pending_frames(struct sock *sk) up->len = 0; up->pending = 0; if (!err) - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_OUTDATAGRAMS, is_udplite); + UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } @@ -726,8 +725,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_SNDBUFERRORS, is_udplite); + UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); } return err; @@ -890,8 +888,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, goto out_free; if (!peeked) - UDP_INC_STATS_USER(sock_net(sk), - UDP_MIB_INDATAGRAMS, is_udplite); + UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_timestamp(msg, sk, skb); @@ -920,7 +917,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, csum_copy_err: lock_sock(sk); if (!skb_kill_datagram(sk, skb, flags)) - UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); release_sock(sk); if (noblock) @@ -991,8 +988,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) ret = (*up->encap_rcv)(sk, skb); if (ret <= 0) { - UDP_INC_STATS_BH(sock_net(sk), - UDP_MIB_INDATAGRAMS, + UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } @@ -1045,8 +1041,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) { - UDP_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); + UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); atomic_inc(&sk->sk_drops); } goto drop; @@ -1055,7 +1050,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) return 0; drop: - UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } @@ -1163,7 +1158,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], struct rtable *rt = (struct rtable*)skb->dst; __be32 saddr = ip_hdr(skb)->saddr; __be32 daddr = ip_hdr(skb)->daddr; - struct net *net = dev_net(skb->dev); + struct net *net; /* * Validate the packet. @@ -1185,6 +1180,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (udp4_csum_init(skb, uh, proto)) goto csum_error; + net = dev_net(skb->dev); if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable); @@ -1218,7 +1214,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (udp_lib_checksum_complete(skb)) goto csum_error; - UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); + UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* @@ -1252,7 +1248,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], ntohs(uh->dest), ulen); drop: - UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); + UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } @@ -1459,8 +1455,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { - UDP_INC_STATS_BH(sock_net(sk), - UDP_MIB_INERRORS, is_lite); + UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); __skb_unlink(skb, rcvq); kfree_skb(skb); } diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index 2ec73e62202c..84127d854cfc 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -119,7 +119,6 @@ static void ipv6_regen_rndid(unsigned long data); static int desync_factor = MAX_DESYNC_FACTOR * HZ; #endif -static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); static int ipv6_count_addresses(struct inet6_dev *idev); /* @@ -184,8 +183,6 @@ struct ipv6_devconf ipv6_devconf __read_mostly = { #endif .proxy_ndp = 0, .accept_source_route = 0, /* we do not accept RH0 by default. */ - .disable_ipv6 = 0, - .accept_dad = 1, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -218,8 +215,6 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { #endif .proxy_ndp = 0, .accept_source_route = 0, /* we do not accept RH0 by default. */ - .disable_ipv6 = 0, - .accept_dad = 1, }; /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ @@ -383,9 +378,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) */ in6_dev_hold(ndev); - if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) - ndev->cnf.accept_dad = -1; - #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { printk(KERN_INFO @@ -586,13 +578,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, struct rt6_info *rt; int hash; int err = 0; - int addr_type = ipv6_addr_type(addr); - - if (addr_type == IPV6_ADDR_ANY || - addr_type & IPV6_ADDR_MULTICAST || - (!(idev->dev->flags & IFF_LOOPBACK) && - addr_type & IPV6_ADDR_LOOPBACK)) - return ERR_PTR(-EADDRNOTAVAIL); rcu_read_lock_bh(); if (idev->dead) { @@ -1427,20 +1412,6 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp) void addrconf_dad_failure(struct inet6_ifaddr *ifp) { - struct inet6_dev *idev = ifp->idev; - if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) { - struct in6_addr addr; - - addr.s6_addr32[0] = htonl(0xfe800000); - addr.s6_addr32[1] = 0; - - if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && - ipv6_addr_equal(&ifp->addr, &addr)) { - /* DAD failed for link-local based on MAC address */ - idev->cnf.disable_ipv6 = 1; - } - } - if (net_ratelimit()) printk(KERN_INFO "%s: duplicate address detected!\n", ifp->idev->dev->name); addrconf_dad_stop(ifp); @@ -2773,7 +2744,6 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) spin_lock_bh(&ifp->lock); if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || - idev->cnf.accept_dad < 1 || !(ifp->flags&IFA_F_TENTATIVE) || ifp->flags & IFA_F_NODAD) { ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); @@ -2821,11 +2791,6 @@ static void addrconf_dad_timer(unsigned long data) read_unlock_bh(&idev->lock); goto out; } - if (idev->cnf.accept_dad > 1 && idev->cnf.disable_ipv6) { - read_unlock_bh(&idev->lock); - addrconf_dad_failure(ifp); - return; - } spin_lock_bh(&ifp->lock); if (ifp->probes == 0) { /* @@ -3685,8 +3650,6 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, #ifdef CONFIG_IPV6_MROUTE array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; #endif - array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; - array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; } static inline size_t inet6_if_nlmsg_size(void) @@ -4245,22 +4208,6 @@ static struct addrconf_sysctl_table .proc_handler = &proc_dointvec, }, #endif - { - .ctl_name = CTL_UNNUMBERED, - .procname = "disable_ipv6", - .data = &ipv6_devconf.disable_ipv6, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "accept_dad", - .data = &ipv6_devconf.accept_dad, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, { .ctl_name = 0, /* sentinel */ } diff --git a/trunk/net/ipv6/af_inet6.c b/trunk/net/ipv6/af_inet6.c index 3d828bc4b1cf..3ce8d2f318c6 100644 --- a/trunk/net/ipv6/af_inet6.c +++ b/trunk/net/ipv6/af_inet6.c @@ -59,7 +59,9 @@ #include #include +#ifdef CONFIG_IPV6_MROUTE #include +#endif MODULE_AUTHOR("Cast of dozens"); MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); @@ -950,9 +952,9 @@ static int __init inet6_init(void) err = icmpv6_init(); if (err) goto icmp_fail; - err = ip6_mr_init(); - if (err) - goto ipmr_fail; +#ifdef CONFIG_IPV6_MROUTE + ip6_mr_init(); +#endif err = ndisc_init(); if (err) goto ndisc_fail; @@ -1055,8 +1057,6 @@ static int __init inet6_init(void) igmp_fail: ndisc_cleanup(); ndisc_fail: - ip6_mr_cleanup(); -ipmr_fail: icmpv6_cleanup(); icmp_fail: unregister_pernet_subsys(&inet6_net_ops); @@ -1111,7 +1111,6 @@ static void __exit inet6_exit(void) ipv6_netfilter_fini(); igmp6_cleanup(); ndisc_cleanup(); - ip6_mr_cleanup(); icmpv6_cleanup(); rawv6_exit(); diff --git a/trunk/net/ipv6/icmp.c b/trunk/net/ipv6/icmp.c index abedf95fdf2d..399d41f65437 100644 --- a/trunk/net/ipv6/icmp.c +++ b/trunk/net/ipv6/icmp.c @@ -954,8 +954,7 @@ ctl_table ipv6_icmp_table_template[] = { .data = &init_net.ipv6.sysctl.icmpv6_time, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_ms_jiffies, - .strategy = &sysctl_ms_jiffies + .proc_handler = &proc_dointvec }, { .ctl_name = 0 }, }; diff --git a/trunk/net/ipv6/ip6_input.c b/trunk/net/ipv6/ip6_input.c index ea81c614dde2..34e5a96623ae 100644 --- a/trunk/net/ipv6/ip6_input.c +++ b/trunk/net/ipv6/ip6_input.c @@ -71,8 +71,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); - if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || - !idev || unlikely(idev->cnf.disable_ipv6)) { + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); goto out; diff --git a/trunk/net/ipv6/ip6_output.c b/trunk/net/ipv6/ip6_output.c index 0981c1ef3057..fd7cd1bfe151 100644 --- a/trunk/net/ipv6/ip6_output.c +++ b/trunk/net/ipv6/ip6_output.c @@ -173,13 +173,6 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) int ip6_output(struct sk_buff *skb) { - struct inet6_dev *idev = ip6_dst_idev(skb->dst); - if (unlikely(idev->cnf.disable_ipv6)) { - IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); - kfree_skb(skb); - return 0; - } - if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb->dst)) return ip6_fragment(skb, ip6_output2); @@ -505,8 +498,7 @@ int ip6_forward(struct sk_buff *skb) int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ - if (addrtype == IPV6_ADDR_ANY || - addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) + if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, diff --git a/trunk/net/ipv6/ip6mr.c b/trunk/net/ipv6/ip6mr.c index cfac26d674ed..90e763073dc5 100644 --- a/trunk/net/ipv6/ip6mr.c +++ b/trunk/net/ipv6/ip6mr.c @@ -948,51 +948,23 @@ static struct notifier_block ip6_mr_notifier = { * Setup for IP multicast routing */ -int __init ip6_mr_init(void) +void __init ip6_mr_init(void) { - int err; - mrt_cachep = kmem_cache_create("ip6_mrt_cache", sizeof(struct mfc6_cache), 0, SLAB_HWCACHE_ALIGN, NULL); if (!mrt_cachep) - return -ENOMEM; + panic("cannot allocate ip6_mrt_cache"); setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); - err = register_netdevice_notifier(&ip6_mr_notifier); - if (err) - goto reg_notif_fail; -#ifdef CONFIG_PROC_FS - err = -ENOMEM; - if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops)) - goto proc_vif_fail; - if (!proc_net_fops_create(&init_net, "ip6_mr_cache", - 0, &ip6mr_mfc_fops)) - goto proc_cache_fail; -#endif - return 0; -reg_notif_fail: - kmem_cache_destroy(mrt_cachep); + register_netdevice_notifier(&ip6_mr_notifier); #ifdef CONFIG_PROC_FS -proc_vif_fail: - unregister_netdevice_notifier(&ip6_mr_notifier); -proc_cache_fail: - proc_net_remove(&init_net, "ip6_mr_vif"); + proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops); + proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops); #endif - return err; } -void ip6_mr_cleanup(void) -{ -#ifdef CONFIG_PROC_FS - proc_net_remove(&init_net, "ip6_mr_cache"); - proc_net_remove(&init_net, "ip6_mr_vif"); -#endif - unregister_netdevice_notifier(&ip6_mr_notifier); - del_timer(&ipmr_expire_timer); - kmem_cache_destroy(mrt_cachep); -} static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock) { diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 5d6c166dfbb6..751e98f9b8b4 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -228,7 +228,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt) static inline int rt6_need_strict(struct in6_addr *daddr) { return (ipv6_addr_type(daddr) & - (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)); + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); } /* @@ -237,20 +237,15 @@ static inline int rt6_need_strict(struct in6_addr *daddr) static inline struct rt6_info *rt6_device_match(struct net *net, struct rt6_info *rt, - struct in6_addr *saddr, int oif, int flags) { struct rt6_info *local = NULL; struct rt6_info *sprt; - if (!oif && ipv6_addr_any(saddr)) - goto out; - - for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { - struct net_device *dev = sprt->rt6i_dev; - - if (oif) { + if (oif) { + for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { + struct net_device *dev = sprt->rt6i_dev; if (dev->ifindex == oif) return sprt; if (dev->flags & IFF_LOOPBACK) { @@ -264,21 +259,14 @@ static inline struct rt6_info *rt6_device_match(struct net *net, } local = sprt; } - } else { - if (ipv6_chk_addr(net, saddr, dev, - flags & RT6_LOOKUP_F_IFACE)) - return sprt; } - } - if (oif) { if (local) return local; if (flags & RT6_LOOKUP_F_IFACE) return net->ipv6.ip6_null_entry; } -out: return rt; } @@ -551,7 +539,7 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); restart: rt = fn->leaf; - rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags); + rt = rt6_device_match(net, rt, fl->oif, flags); BACKTRACK(net, &fl->fl6_src); out: dst_use(&rt->u.dst, jiffies); diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index d1477b350f76..f91e1df0d25e 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -166,8 +166,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, goto out_free; if (!peeked) - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_INDATAGRAMS, is_udplite); + UDP6_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_timestamp(msg, sk, skb); @@ -214,7 +213,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, csum_copy_err: lock_sock(sk); if (!skb_kill_datagram(sk, skb, flags)) - UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); release_sock(sk); if (flags & MSG_DONTWAIT) @@ -299,8 +298,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) { - UDP6_INC_STATS_BH(sock_net(sk), - UDP_MIB_RCVBUFERRORS, is_udplite); + UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite); atomic_inc(&sk->sk_drops); } goto drop; @@ -308,7 +306,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) return 0; drop: - UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } @@ -440,7 +438,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], struct net_device *dev = skb->dev; struct in6_addr *saddr, *daddr; u32 ulen = 0; - struct net *net = dev_net(skb->dev); + struct net *net; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto short_packet; @@ -475,6 +473,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (udp6_csum_init(skb, uh, proto)) goto discard; + net = dev_net(skb->dev); /* * Multicast receive code */ @@ -497,8 +496,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (udp_lib_checksum_complete(skb)) goto discard; - UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, - proto == IPPROTO_UDPLITE); + UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); @@ -523,7 +521,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], ulen, skb->len); discard: - UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); + UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } @@ -593,8 +591,7 @@ static int udp_v6_push_pending_frames(struct sock *sk) up->len = 0; up->pending = 0; if (!err) - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_OUTDATAGRAMS, is_udplite); + UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } @@ -876,8 +873,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_SNDBUFERRORS, is_udplite); + UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); } return err; diff --git a/trunk/net/rxrpc/ar-input.c b/trunk/net/rxrpc/ar-input.c index f98c8027e5c1..f8a699e92962 100644 --- a/trunk/net/rxrpc/ar-input.c +++ b/trunk/net/rxrpc/ar-input.c @@ -21,7 +21,6 @@ #include #include #include -#include #include "ar-internal.h" unsigned long rxrpc_ack_timeout = 1; @@ -709,12 +708,12 @@ void rxrpc_data_ready(struct sock *sk, int count) if (skb_checksum_complete(skb)) { rxrpc_free_skb(skb); rxrpc_put_local(local); - UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0); + UDP_INC_STATS_BH(UDP_MIB_INERRORS, 0); _leave(" [CSUM failed]"); return; } - UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0); + UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 0); /* the socket buffer we have is owned by UDP, with UDP's data all over * it, but we really want our own */ diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index df5572c39f0c..43460a1cb6d0 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -4223,8 +4223,6 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len, if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) return -EFAULT; - printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD " - "socket option deprecated\n"); /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, id); if (!asoc) @@ -4264,9 +4262,6 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, if (getaddrs.addr_num <= 0) return -EINVAL; - printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD " - "socket option deprecated\n"); - /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) @@ -4360,9 +4355,6 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) return -EFAULT; - printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD " - "socket option deprecated\n"); - /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound @@ -4523,10 +4515,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, if (getaddrs.addr_num <= 0 || getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) return -EINVAL; - - printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD " - "socket option deprecated\n"); - /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound