From c06ca9ebd56da57af3c6308d157edfe9b1603559 Mon Sep 17 00:00:00 2001 From: Michael Buesch Date: Fri, 6 Nov 2009 18:32:44 +0100 Subject: [PATCH] --- yaml --- r: 171635 b: refs/heads/master c: f54a52021d7ad039c16fe5a1e094d8f0394d90ec h: refs/heads/master i: 171633: 8684f8870fe43d62573ba1b63aa30b0442ab44d2 171631: b2960084bb5375750936f12e9c8eb42a7e503499 v: v3 --- [refs] | 2 +- .../powerpc/dts-bindings/fsl/mpc5200.txt | 10 - trunk/MAINTAINERS | 7 +- trunk/arch/alpha/include/asm/unistd.h | 3 +- trunk/drivers/atm/solos-pci.c | 29 +- trunk/drivers/isdn/hardware/mISDN/hfcmulti.c | 2 +- trunk/drivers/isdn/i4l/isdn_ppp.c | 352 +++++---- trunk/drivers/misc/iwmc3200top/main.c | 30 +- trunk/drivers/net/Kconfig | 2 +- trunk/drivers/net/arm/ks8695net.c | 35 +- trunk/drivers/net/arm/w90p910_ether.c | 4 +- trunk/drivers/net/atl1c/atl1c_main.c | 2 +- trunk/drivers/net/bnx2x.h | 21 +- trunk/drivers/net/bnx2x_main.c | 370 +++++---- trunk/drivers/net/bonding/bond_3ad.c | 85 ++- trunk/drivers/net/bonding/bond_main.c | 2 +- trunk/drivers/net/can/Kconfig | 62 +- trunk/drivers/net/can/Makefile | 1 - trunk/drivers/net/can/dev.c | 6 - trunk/drivers/net/can/mcp251x.c | 18 +- trunk/drivers/net/can/mscan/Kconfig | 23 - trunk/drivers/net/can/mscan/Makefile | 5 - trunk/drivers/net/can/mscan/mpc5xxx_can.c | 259 ------- trunk/drivers/net/can/mscan/mscan.c | 668 ----------------- trunk/drivers/net/can/mscan/mscan.h | 296 -------- trunk/drivers/net/can/sja1000/Kconfig | 47 -- trunk/drivers/net/can/sja1000/sja1000.c | 2 +- trunk/drivers/net/can/usb/Kconfig | 10 - trunk/drivers/net/can/usb/Makefile | 2 - trunk/drivers/net/davinci_emac.c | 3 + trunk/drivers/net/dm9000.c | 143 +--- trunk/drivers/net/dm9000.h | 7 - trunk/drivers/net/ethoc.c | 2 +- trunk/drivers/net/forcedeth.c | 5 +- trunk/drivers/net/gianfar.c | 43 +- trunk/drivers/net/gianfar_sysfs.c | 2 +- trunk/drivers/net/hamradio/mkiss.c | 2 +- trunk/drivers/net/igb/igb.h | 13 +- trunk/drivers/net/igb/igb_ethtool.c | 181 +++-- trunk/drivers/net/igb/igb_main.c | 213 +++--- trunk/drivers/net/ipg.c | 2 +- trunk/drivers/net/irda/irda-usb.c | 2 +- trunk/drivers/net/ixgbe/ixgbe_main.c | 8 +- trunk/drivers/net/macvlan.c | 78 +- trunk/drivers/net/niu.c | 2 +- trunk/drivers/net/pcmcia/fmvj18x_cs.c | 2 +- trunk/drivers/net/pcmcia/nmclan_cs.c | 2 +- trunk/drivers/net/ppp_async.c | 2 +- trunk/drivers/net/ppp_generic.c | 13 +- trunk/drivers/net/pppoe.c | 2 +- trunk/drivers/net/pppol2tp.c | 4 +- trunk/drivers/net/qlge/qlge.h | 2 +- trunk/drivers/net/qlge/qlge_main.c | 26 +- trunk/drivers/net/r6040.c | 4 +- trunk/drivers/net/r8169.c | 4 +- trunk/drivers/net/s2io.c | 1 - trunk/drivers/net/smsc911x.c | 2 +- trunk/drivers/net/smsc9420.c | 2 +- trunk/drivers/net/tg3.c | 706 ++++++------------ trunk/drivers/net/tg3.h | 34 +- trunk/drivers/net/tokenring/3c359.c | 3 +- trunk/drivers/net/tokenring/olympic.c | 4 +- trunk/drivers/net/typhoon.c | 2 +- trunk/drivers/net/via-rhine.c | 2 +- trunk/drivers/net/via-velocity.c | 2 +- trunk/drivers/net/vmxnet3/vmxnet3_defs.h | 246 ++---- trunk/drivers/net/vmxnet3/vmxnet3_drv.c | 359 +++------ trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c | 10 +- trunk/drivers/net/vmxnet3/vmxnet3_int.h | 12 +- trunk/drivers/net/wan/dscc4.c | 2 +- trunk/drivers/net/wireless/adm8211.c | 2 +- trunk/drivers/net/wireless/b43/dma.c | 48 +- trunk/drivers/net/wireless/b43/xmit.h | 19 + trunk/drivers/net/wireless/ipw2x00/ipw2100.c | 6 +- trunk/drivers/net/wireless/ipw2x00/ipw2200.c | 141 +++- trunk/drivers/net/wireless/ipw2x00/libipw.h | 8 +- .../net/wireless/ipw2x00/libipw_module.c | 42 +- trunk/drivers/net/wireless/iwlwifi/iwl-1000.c | 1 - .../net/wireless/iwlwifi/iwl-3945-rs.c | 2 +- trunk/drivers/net/wireless/p54/p54pci.c | 2 +- trunk/drivers/net/wireless/p54/p54usb.c | 10 +- .../net/wireless/rtl818x/rtl8180_dev.c | 2 +- trunk/drivers/s390/net/Makefile | 6 +- trunk/drivers/s390/net/claw.c | 82 +- trunk/drivers/s390/net/claw.h | 12 - trunk/drivers/s390/net/ctcm_fsms.c | 1 + trunk/drivers/s390/net/ctcm_fsms.h | 1 + trunk/drivers/s390/net/ctcm_main.c | 109 +-- trunk/drivers/s390/net/ctcm_main.h | 20 +- trunk/drivers/s390/net/ctcm_mpc.c | 1 + trunk/drivers/s390/net/ctcm_sysfs.c | 11 +- trunk/drivers/s390/net/cu3088.c | 148 ++++ trunk/drivers/s390/net/cu3088.h | 41 + trunk/drivers/s390/net/fsm.c | 1 - trunk/drivers/s390/net/fsm.h | 2 - trunk/drivers/s390/net/lcs.c | 101 +-- trunk/drivers/s390/net/lcs.h | 18 - trunk/drivers/s390/net/netiucv.c | 4 +- trunk/drivers/s390/net/qeth_core.h | 6 +- trunk/drivers/s390/net/qeth_core_main.c | 214 ++---- trunk/drivers/s390/net/qeth_core_mpc.h | 45 +- trunk/drivers/s390/net/qeth_core_sys.c | 83 +- trunk/drivers/s390/net/qeth_l2_main.c | 29 +- trunk/drivers/s390/net/qeth_l3.h | 2 - trunk/drivers/s390/net/qeth_l3_main.c | 142 ++-- trunk/drivers/s390/net/qeth_l3_sys.c | 67 +- trunk/include/linux/if_ether.h | 4 + trunk/include/linux/isdn_ppp.h | 2 +- trunk/include/linux/netdevice.h | 77 +- trunk/include/linux/notifier.h | 1 - trunk/include/linux/tcp.h | 6 - trunk/include/net/inet_hashtables.h | 4 +- trunk/include/net/inetpeer.h | 16 +- trunk/include/net/phonet/pn_dev.h | 2 +- trunk/include/net/sctp/structs.h | 2 +- trunk/include/net/tcp.h | 3 + trunk/kernel/time/clocksource.c | 6 +- trunk/kernel/time/timecompare.c | 6 +- trunk/net/8021q/vlan.c | 2 +- trunk/net/8021q/vlan.h | 17 - trunk/net/8021q/vlan_core.c | 16 +- trunk/net/8021q/vlan_dev.c | 51 +- trunk/net/atm/ioctl.c | 177 +---- trunk/net/bluetooth/hci_conn.c | 1 - trunk/net/bluetooth/l2cap.c | 13 +- trunk/net/core/dev.c | 193 ++--- trunk/net/core/link_watch.c | 94 +-- trunk/net/core/skbuff.c | 3 - trunk/net/decnet/dn_dev.c | 23 +- trunk/net/ethernet/eth.c | 7 + trunk/net/ieee802154/wpan-class.c | 2 +- trunk/net/ipv4/devinet.c | 61 +- trunk/net/ipv4/fib_frontend.c | 11 +- trunk/net/ipv4/igmp.c | 27 +- trunk/net/ipv4/inetpeer.c | 5 +- trunk/net/ipv4/ip_gre.c | 2 +- trunk/net/ipv4/ipip.c | 2 +- trunk/net/ipv4/ipmr.c | 4 +- trunk/net/ipv4/route.c | 2 +- trunk/net/ipv4/tcp.c | 19 +- trunk/net/ipv4/tcp_input.c | 4 +- trunk/net/ipv4/tcp_ipv4.c | 22 +- trunk/net/ipv4/tcp_minisocks.c | 2 +- trunk/net/ipv6/addrconf.c | 173 ++--- trunk/net/ipv6/anycast.c | 29 +- trunk/net/ipv6/ip6_tunnel.c | 2 +- trunk/net/ipv6/mcast.c | 51 +- trunk/net/ipv6/sit.c | 2 +- trunk/net/ipv6/tcp_ipv6.c | 2 +- trunk/net/iucv/iucv.c | 16 +- trunk/net/key/af_key.c | 2 +- trunk/net/netfilter/nf_conntrack_proto_dccp.c | 2 +- trunk/net/netfilter/nf_conntrack_proto_gre.c | 2 +- trunk/net/netlink/af_netlink.c | 2 +- trunk/net/phonet/af_phonet.c | 22 +- trunk/net/phonet/pn_dev.c | 124 ++- trunk/net/phonet/pn_netlink.c | 6 +- trunk/net/sched/act_mirred.c | 105 ++- trunk/net/sched/sch_generic.c | 18 +- trunk/net/sctp/associola.c | 4 +- trunk/net/sctp/sm_statefuns.c | 15 +- trunk/net/sctp/socket.c | 40 +- trunk/net/sctp/transport.c | 3 +- trunk/net/socket.c | 335 +++++++-- 164 files changed, 2723 insertions(+), 5059 deletions(-) delete mode 100644 trunk/drivers/net/can/mscan/Kconfig delete mode 100644 trunk/drivers/net/can/mscan/Makefile delete mode 100644 trunk/drivers/net/can/mscan/mpc5xxx_can.c delete mode 100644 trunk/drivers/net/can/mscan/mscan.c delete mode 100644 trunk/drivers/net/can/mscan/mscan.h delete mode 100644 trunk/drivers/net/can/sja1000/Kconfig delete mode 100644 trunk/drivers/net/can/usb/Kconfig create mode 100644 trunk/drivers/s390/net/cu3088.c create mode 100644 trunk/drivers/s390/net/cu3088.h diff --git a/[refs] b/[refs] index 2fe444859921..a2fb20c85c04 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: ea31ba359c55e0734ff895692185d4c50cf0c537 +refs/heads/master: f54a52021d7ad039c16fe5a1e094d8f0394d90ec diff --git a/trunk/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt index cabc780f7258..8447fd7090d0 100644 --- a/trunk/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt +++ b/trunk/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt @@ -178,13 +178,3 @@ External interrupts: external irq3: interrupts = <1 3 n>; 'n' is sense (0: level high, 1: edge rising, 2: edge falling 3: level low) -fsl,mpc5200-mscan nodes ------------------------ -In addition to the required compatible-, reg- and interrupt-properites, you can -also specify which clock source shall be used for the controller: - -- fsl,mscan-clock-source- a string describing the clock source. Valid values - are: "ip" for ip bus clock - "ref" for reference clock (XTAL) - "ref" is default in case this property is not - present. diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index b2e3f3507ca3..7f2f29cf75ff 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3704,9 +3704,9 @@ F: include/linux/if_* F: include/linux/*device.h NETXEN (1/10) GbE SUPPORT -M: Amit Kumar Salecha +M: Dhananjay Phadke L: netdev@vger.kernel.org -W: http://www.qlogic.com +W: http://www.netxen.com S: Supported F: drivers/net/netxen/ @@ -4304,7 +4304,6 @@ F: drivers/video/aty/aty128fb.c RALINK RT2X00 WIRELESS LAN DRIVER P: rt2x00 project M: Ivo van Doorn -M: Gertjan van Wingerde L: linux-wireless@vger.kernel.org L: users@rt2x00.serialmonkey.com (moderated for non-subscribers) W: http://rt2x00.serialmonkey.com/ @@ -4392,7 +4391,7 @@ RFKILL M: Johannes Berg L: linux-wireless@vger.kernel.org S: Maintained -F: Documentation/rfkill.txt +F Documentation/rfkill.txt F: net/rfkill/ RISCOM8 DRIVER diff --git a/trunk/arch/alpha/include/asm/unistd.h b/trunk/arch/alpha/include/asm/unistd.h index 7f23665122df..5b5c17485942 100644 --- a/trunk/arch/alpha/include/asm/unistd.h +++ b/trunk/arch/alpha/include/asm/unistd.h @@ -433,11 +433,10 @@ #define __NR_signalfd 476 #define __NR_timerfd 477 #define __NR_eventfd 478 -#define __NR_recvmmsg 479 #ifdef __KERNEL__ -#define NR_SYSCALLS 480 +#define NR_SYSCALLS 479 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/drivers/atm/solos-pci.c b/trunk/drivers/atm/solos-pci.c index 51eed679a059..d7ad19d2603a 100644 --- a/trunk/drivers/atm/solos-pci.c +++ b/trunk/drivers/atm/solos-pci.c @@ -531,37 +531,34 @@ static int flash_upgrade(struct solos_card *card, int chip) int numblocks = 0; int offset; - switch (chip) { - case 0: + if (chip == 0) { fw_name = "solos-FPGA.bin"; blocksize = FPGA_BLOCK; - break; - case 1: + } + + if (chip == 1) { fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; - break; - case 2: + } + + if (chip == 2){ if (card->fpga_version > LEGACY_BUFFERS){ fw_name = "solos-db-FPGA.bin"; blocksize = FPGA_BLOCK; } else { - dev_info(&card->dev->dev, "FPGA version doesn't support" - " daughter board upgrades\n"); + dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); return -EPERM; } - break; - case 3: + } + + if (chip == 3){ if (card->fpga_version > LEGACY_BUFFERS){ fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; } else { - dev_info(&card->dev->dev, "FPGA version doesn't support" - " daughter board upgrades\n"); - return -EPERM; + dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); + return -EPERM; } - break; - default: - return -ENODEV; } if (request_firmware(&fw, fw_name, &card->dev->dev)) diff --git a/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c b/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c index a6624ad252c5..faed794cf75a 100644 --- a/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -5481,7 +5481,7 @@ HFCmulti_init(void) if (err) { printk(KERN_ERR "error registering embedded driver: " "%x\n", err); - return err; + return -err; } HFC_cnt++; printk(KERN_INFO "%d devices registered\n", HFC_cnt); diff --git a/trunk/drivers/isdn/i4l/isdn_ppp.c b/trunk/drivers/isdn/i4l/isdn_ppp.c index 642d5aaf53ce..2d14b64202a3 100644 --- a/trunk/drivers/isdn/i4l/isdn_ppp.c +++ b/trunk/drivers/isdn/i4l/isdn_ppp.c @@ -1535,8 +1535,10 @@ static int isdn_ppp_mp_bundle_array_init(void) int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) return -ENOMEM; - for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) + for (i = 0; i < ISDN_MAX_CHANNELS; i++) { spin_lock_init(&isdn_ppp_bundle_arr[i].lock); + skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags); + } return 0; } @@ -1569,7 +1571,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ) if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) return -ENOMEM; lp->next = lp->last = lp; /* nobody else in a queue */ - lp->netdev->pb->frags = NULL; + skb_queue_head_init(&lp->netdev->pb->frags); lp->netdev->pb->frames = 0; lp->netdev->pb->seq = UINT_MAX; } @@ -1581,28 +1583,29 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ) static u32 isdn_ppp_mp_get_seq( int short_seq, struct sk_buff * skb, u32 last_seq ); -static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, - struct sk_buff * from, struct sk_buff * to ); -static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, - struct sk_buff * from, struct sk_buff * to ); -static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb ); +static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, + struct sk_buff *to); +static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, + struct sk_buff *from, struct sk_buff *to, + u32 lastseq); +static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb); static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, - struct sk_buff *skb) + struct sk_buff *skb) { - struct ippp_struct *is; - isdn_net_local * lpq; - ippp_bundle * mp; - isdn_mppp_stats * stats; - struct sk_buff * newfrag, * frag, * start, *nextf; + struct sk_buff *newfrag, *frag, *start, *nextf; u32 newseq, minseq, thisseq; + isdn_mppp_stats *stats; + struct ippp_struct *is; unsigned long flags; + isdn_net_local *lpq; + ippp_bundle *mp; int slot; spin_lock_irqsave(&net_dev->pb->lock, flags); - mp = net_dev->pb; - stats = &mp->stats; + mp = net_dev->pb; + stats = &mp->stats; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", @@ -1613,20 +1616,19 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, return; } is = ippp_table[slot]; - if( ++mp->frames > stats->max_queue_len ) + if (++mp->frames > stats->max_queue_len) stats->max_queue_len = mp->frames; - + if (is->debug & 0x8) isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); - newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, - skb, is->last_link_seqno); - + newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, + skb, is->last_link_seqno); /* if this packet seq # is less than last already processed one, * toss it right away, but check for sequence start case first */ - if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) { + if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { mp->seq = newseq; /* the first packet: required for * rfc1990 non-compliant clients -- * prevents constant packet toss */ @@ -1636,7 +1638,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, spin_unlock_irqrestore(&mp->lock, flags); return; } - + /* find the minimum received sequence number over all links */ is->last_link_seqno = minseq = newseq; for (lpq = net_dev->queue;;) { @@ -1657,22 +1659,31 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * packets */ newfrag = skb; - /* if this new fragment is before the first one, then enqueue it now. */ - if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { - newfrag->next = frag; - mp->frags = frag = newfrag; - newfrag = NULL; - } + /* Insert new fragment into the proper sequence slot. */ + skb_queue_walk(&mp->frags, frag) { + if (MP_SEQ(frag) == newseq) { + isdn_ppp_mp_free_skb(mp, newfrag); + newfrag = NULL; + break; + } + if (MP_LT(newseq, MP_SEQ(frag))) { + __skb_queue_before(&mp->frags, frag, newfrag); + newfrag = NULL; + break; + } + } + if (newfrag) + __skb_queue_tail(&mp->frags, newfrag); - start = MP_FLAGS(frag) & MP_BEGIN_FRAG && - MP_SEQ(frag) == mp->seq ? frag : NULL; + frag = skb_peek(&mp->frags); + start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) && + (MP_SEQ(frag) == mp->seq)) ? frag : NULL; + if (!start) + goto check_overflow; - /* - * main fragment traversing loop + /* main fragment traversing loop * * try to accomplish several tasks: - * - insert new fragment into the proper sequence slot (once that's done - * newfrag will be set to NULL) * - reassemble any complete fragment sequence (non-null 'start' * indicates there is a continguous sequence present) * - discard any incomplete sequences that are below minseq -- due @@ -1681,71 +1692,46 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * come to complete such sequence and it should be discarded * * loop completes when we accomplished the following tasks: - * - new fragment is inserted in the proper sequence ('newfrag' is - * set to NULL) * - we hit a gap in the sequence, so no reassembly/processing is * possible ('start' would be set to NULL) * * algorithm for this code is derived from code in the book * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) */ - while (start != NULL || newfrag != NULL) { - - thisseq = MP_SEQ(frag); - nextf = frag->next; - - /* drop any duplicate fragments */ - if (newfrag != NULL && thisseq == newseq) { - isdn_ppp_mp_free_skb(mp, newfrag); - newfrag = NULL; - } - - /* insert new fragment before next element if possible. */ - if (newfrag != NULL && (nextf == NULL || - MP_LT(newseq, MP_SEQ(nextf)))) { - newfrag->next = nextf; - frag->next = nextf = newfrag; - newfrag = NULL; - } - - if (start != NULL) { - /* check for misplaced start */ - if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { - printk(KERN_WARNING"isdn_mppp(seq %d): new " - "BEGIN flag with no prior END", thisseq); - stats->seqerrs++; - stats->frame_drops++; - start = isdn_ppp_mp_discard(mp, start,frag); - nextf = frag->next; - } - } else if (MP_LE(thisseq, minseq)) { - if (MP_FLAGS(frag) & MP_BEGIN_FRAG) + skb_queue_walk_safe(&mp->frags, frag, nextf) { + thisseq = MP_SEQ(frag); + + /* check for misplaced start */ + if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { + printk(KERN_WARNING"isdn_mppp(seq %d): new " + "BEGIN flag with no prior END", thisseq); + stats->seqerrs++; + stats->frame_drops++; + isdn_ppp_mp_discard(mp, start, frag); + start = frag; + } else if (MP_LE(thisseq, minseq)) { + if (MP_FLAGS(frag) & MP_BEGIN_FRAG) start = frag; - else { + else { if (MP_FLAGS(frag) & MP_END_FRAG) - stats->frame_drops++; - if( mp->frags == frag ) - mp->frags = nextf; + stats->frame_drops++; + __skb_unlink(skb, &mp->frags); isdn_ppp_mp_free_skb(mp, frag); - frag = nextf; continue; - } + } } - - /* if start is non-null and we have end fragment, then - * we have full reassembly sequence -- reassemble - * and process packet now + + /* if we have end fragment, then we have full reassembly + * sequence -- reassemble and process packet now */ - if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { - minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; - /* Reassemble the packet then dispatch it */ - isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); - - start = NULL; - frag = NULL; + if (MP_FLAGS(frag) & MP_END_FRAG) { + minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; + /* Reassemble the packet then dispatch it */ + isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq); - mp->frags = nextf; - } + start = NULL; + frag = NULL; + } /* check if need to update start pointer: if we just * reassembled the packet and sequence is contiguous @@ -1756,26 +1742,25 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * below low watermark and set start to the next frag or * clear start ptr. */ - if (nextf != NULL && + if (nextf != (struct sk_buff *)&mp->frags && ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { - /* if we just reassembled and the next one is here, - * then start another reassembly. */ - - if (frag == NULL) { + /* if we just reassembled and the next one is here, + * then start another reassembly. + */ + if (frag == NULL) { if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) - start = nextf; - else - { - printk(KERN_WARNING"isdn_mppp(seq %d):" - " END flag with no following " - "BEGIN", thisseq); + start = nextf; + else { + printk(KERN_WARNING"isdn_mppp(seq %d):" + " END flag with no following " + "BEGIN", thisseq); stats->seqerrs++; } } - - } else { - if ( nextf != NULL && frag != NULL && - MP_LT(thisseq, minseq)) { + } else { + if (nextf != (struct sk_buff *)&mp->frags && + frag != NULL && + MP_LT(thisseq, minseq)) { /* we've got a break in the sequence * and we not at the end yet * and we did not just reassembled @@ -1784,41 +1769,39 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * discard all the frames below low watermark * and start over */ stats->frame_drops++; - mp->frags = isdn_ppp_mp_discard(mp,start,nextf); + isdn_ppp_mp_discard(mp, start, nextf); } /* break in the sequence, no reassembly */ - start = NULL; - } - - frag = nextf; - } /* while -- main loop */ - - if (mp->frags == NULL) - mp->frags = frag; - + start = NULL; + } + if (!start) + break; + } + +check_overflow: /* rather straighforward way to deal with (not very) possible - * queue overflow */ + * queue overflow + */ if (mp->frames > MP_MAX_QUEUE_LEN) { stats->overflows++; - while (mp->frames > MP_MAX_QUEUE_LEN) { - frag = mp->frags->next; - isdn_ppp_mp_free_skb(mp, mp->frags); - mp->frags = frag; + skb_queue_walk_safe(&mp->frags, frag, nextf) { + if (mp->frames <= MP_MAX_QUEUE_LEN) + break; + __skb_unlink(frag, &mp->frags); + isdn_ppp_mp_free_skb(mp, frag); } } spin_unlock_irqrestore(&mp->lock, flags); } -static void isdn_ppp_mp_cleanup( isdn_net_local * lp ) +static void isdn_ppp_mp_cleanup(isdn_net_local *lp) { - struct sk_buff * frag = lp->netdev->pb->frags; - struct sk_buff * nextfrag; - while( frag ) { - nextfrag = frag->next; - isdn_ppp_mp_free_skb(lp->netdev->pb, frag); - frag = nextfrag; - } - lp->netdev->pb->frags = NULL; + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) { + __skb_unlink(skb, &lp->netdev->pb->frags); + isdn_ppp_mp_free_skb(lp->netdev->pb, skb); + } } static u32 isdn_ppp_mp_get_seq( int short_seq, @@ -1855,72 +1838,115 @@ static u32 isdn_ppp_mp_get_seq( int short_seq, return seq; } -struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, - struct sk_buff * from, struct sk_buff * to ) +static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, + struct sk_buff *to) { - if( from ) - while (from != to) { - struct sk_buff * next = from->next; - isdn_ppp_mp_free_skb(mp, from); - from = next; + if (from) { + struct sk_buff *skb, *tmp; + int freeing = 0; + + skb_queue_walk_safe(&mp->frags, skb, tmp) { + if (skb == to) + break; + if (skb == from) + freeing = 1; + if (!freeing) + continue; + __skb_unlink(skb, &mp->frags); + isdn_ppp_mp_free_skb(mp, skb); } - return from; + } } -void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, - struct sk_buff * from, struct sk_buff * to ) +static unsigned int calc_tot_len(struct sk_buff_head *queue, + struct sk_buff *from, struct sk_buff *to) { - ippp_bundle * mp = net_dev->pb; - int proto; - struct sk_buff * skb; + unsigned int tot_len = 0; + struct sk_buff *skb; + int found_start = 0; + + skb_queue_walk(queue, skb) { + if (skb == from) + found_start = 1; + if (!found_start) + continue; + tot_len += skb->len - MP_HEADER_LEN; + if (skb == to) + break; + } + return tot_len; +} + +/* Reassemble packet using fragments in the reassembly queue from + * 'from' until 'to', inclusive. + */ +static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, + struct sk_buff *from, struct sk_buff *to, + u32 lastseq) +{ + ippp_bundle *mp = net_dev->pb; unsigned int tot_len; + struct sk_buff *skb; + int proto; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } - if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) { - if( ippp_table[lp->ppp_slot]->debug & 0x40 ) + + tot_len = calc_tot_len(&mp->frags, from, to); + + if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) { + if (ippp_table[lp->ppp_slot]->debug & 0x40) printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " - "len %d\n", MP_SEQ(from), from->len ); + "len %d\n", MP_SEQ(from), from->len); skb = from; skb_pull(skb, MP_HEADER_LEN); + __skb_unlink(skb, &mp->frags); mp->frames--; } else { - struct sk_buff * frag; - int n; + struct sk_buff *walk, *tmp; + int found_start = 0; - for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++) - tot_len += frag->len - MP_HEADER_LEN; - - if( ippp_table[lp->ppp_slot]->debug & 0x40 ) + if (ippp_table[lp->ppp_slot]->debug & 0x40) printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " - "to %d, len %d\n", MP_SEQ(from), - (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len ); - if( (skb = dev_alloc_skb(tot_len)) == NULL ) { + "to %d, len %d\n", MP_SEQ(from), lastseq, + tot_len); + + skb = dev_alloc_skb(tot_len); + if (!skb) printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " - "of size %d\n", tot_len); - isdn_ppp_mp_discard(mp, from, to); - return; - } + "of size %d\n", tot_len); + + found_start = 0; + skb_queue_walk_safe(&mp->frags, walk, tmp) { + if (walk == from) + found_start = 1; + if (!found_start) + continue; - while( from != to ) { - unsigned int len = from->len - MP_HEADER_LEN; + if (skb) { + unsigned int len = walk->len - MP_HEADER_LEN; + skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN, + skb_put(skb, len), + len); + } + __skb_unlink(walk, &mp->frags); + isdn_ppp_mp_free_skb(mp, walk); - skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, - skb_put(skb,len), - len); - frag = from->next; - isdn_ppp_mp_free_skb(mp, from); - from = frag; + if (walk == to) + break; } } + if (!skb) + return; + proto = isdn_ppp_strip_proto(skb); isdn_ppp_push_higher(net_dev, lp, skb, proto); } -static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb) +static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) { dev_kfree_skb(skb); mp->frames--; diff --git a/trunk/drivers/misc/iwmc3200top/main.c b/trunk/drivers/misc/iwmc3200top/main.c index 02b3dadc8abd..6e4e49113ab4 100644 --- a/trunk/drivers/misc/iwmc3200top/main.c +++ b/trunk/drivers/misc/iwmc3200top/main.c @@ -41,13 +41,36 @@ #define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver" #define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation." -#define DRIVER_VERSION "0.1.62" +#define IWMCT_VERSION "0.1.62" + +#ifdef REPOSITORY_LABEL +#define RL REPOSITORY_LABEL +#else +#define RL local +#endif + +#ifdef CONFIG_IWMC3200TOP_DEBUG +#define VD "-d" +#else +#define VD +#endif + +#define DRIVER_VERSION IWMCT_VERSION "-" __stringify(RL) VD MODULE_DESCRIPTION(DRIVER_DESCRIPTION); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_COPYRIGHT); + +/* FIXME: These can be found in sdio_ids.h in newer kernels */ +#ifndef SDIO_INTEL_VENDOR_ID +#define SDIO_INTEL_VENDOR_ID 0x0089 +#endif +#ifndef SDIO_DEVICE_ID_INTEL_IWMC3200TOP +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#endif + /* * This workers main task is to wait for OP_OPR_ALIVE * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed. @@ -639,9 +662,8 @@ static void iwmct_remove(struct sdio_func *func) static const struct sdio_device_id iwmct_ids[] = { - /* Intel Wireless MultiCom 3200 Top Driver */ - { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)}, - { }, /* Terminating entry */ + { SDIO_DEVICE(SDIO_INTEL_VENDOR_ID, SDIO_DEVICE_ID_INTEL_IWMC3200TOP)}, + { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, iwmct_ids); diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index 6399abbdad6b..e012c2e0825a 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -3235,7 +3235,7 @@ config VIRTIO_NET config VMXNET3 tristate "VMware VMXNET3 ethernet driver" - depends on PCI && INET + depends on PCI && X86 && INET help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the diff --git a/trunk/drivers/net/arm/ks8695net.c b/trunk/drivers/net/arm/ks8695net.c index be256b34cea8..0073d198715b 100644 --- a/trunk/drivers/net/arm/ks8695net.c +++ b/trunk/drivers/net/arm/ks8695net.c @@ -433,16 +433,24 @@ ks8695_rx_irq(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ks8695_priv *ksp = netdev_priv(ndev); + unsigned long status; + + unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); spin_lock(&ksp->rx_lock); - if (napi_schedule_prep(&ksp->napi)) { - unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN); - unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); - /*disable rx interrupt*/ - status &= ~mask_bit; - writel(status , KS8695_IRQ_VA + KS8695_INTEN); - __napi_schedule(&ksp->napi); + status = readl(KS8695_IRQ_VA + KS8695_INTST); + + /*clean rx status bit*/ + writel(status | mask_bit , KS8695_IRQ_VA + KS8695_INTST); + + if (status & mask_bit) { + if (napi_schedule_prep(&ksp->napi)) { + /*disable rx interrupt*/ + status &= ~mask_bit; + writel(status , KS8695_IRQ_VA + KS8695_INTEN); + __napi_schedule(&ksp->napi); + } } spin_unlock(&ksp->rx_lock); @@ -544,13 +552,14 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) ksp->next_rx_desc_read = (last_rx_processed + 1) & MAX_RX_DESC_MASK; - } - /* And refill the buffers */ - ks8695_refill_rxbuffers(ksp); - /* Kick the RX DMA engine, in case it became - * suspended */ - ks8695_writereg(ksp, KS8695_DRSC, 0); + /* And refill the buffers */ + ks8695_refill_rxbuffers(ksp); + + /* Kick the RX DMA engine, in case it became + * suspended */ + ks8695_writereg(ksp, KS8695_DRSC, 0); + } return received; } diff --git a/trunk/drivers/net/arm/w90p910_ether.c b/trunk/drivers/net/arm/w90p910_ether.c index b7f3866d546f..25e2627eb118 100644 --- a/trunk/drivers/net/arm/w90p910_ether.c +++ b/trunk/drivers/net/arm/w90p910_ether.c @@ -160,8 +160,8 @@ struct w90p910_ether { struct mii_if_info mii; struct timer_list check_timer; void __iomem *reg; - int rxirq; - int txirq; + unsigned int rxirq; + unsigned int txirq; unsigned int cur_tx; unsigned int cur_rx; unsigned int finish_tx; diff --git a/trunk/drivers/net/atl1c/atl1c_main.c b/trunk/drivers/net/atl1c/atl1c_main.c index 1e2f57d4c367..5ef9e23435f4 100644 --- a/trunk/drivers/net/atl1c/atl1c_main.c +++ b/trunk/drivers/net/atl1c/atl1c_main.c @@ -2135,7 +2135,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter) if (!adapter->have_msi) flags |= IRQF_SHARED; - err = request_irq(adapter->pdev->irq, atl1c_intr, flags, + err = request_irq(adapter->pdev->irq, &atl1c_intr, flags, netdev->name, netdev); if (err) { if (netif_msg_ifup(adapter)) diff --git a/trunk/drivers/net/bnx2x.h b/trunk/drivers/net/bnx2x.h index 602ab86b6392..928942b74ce6 100644 --- a/trunk/drivers/net/bnx2x.h +++ b/trunk/drivers/net/bnx2x.h @@ -259,6 +259,9 @@ struct bnx2x_eth_q_stats { struct bnx2x_fastpath { struct napi_struct napi; + + u8 is_rx_queue; + struct host_status_block *status_blk; dma_addr_t status_blk_mapping; @@ -967,7 +970,8 @@ struct bnx2x { #define BNX2X_STATE_ERROR 0xf000 int multi_mode; - int num_queues; + int num_rx_queues; + int num_tx_queues; u32 rx_mode; #define BNX2X_RX_MODE_NONE 0 @@ -1070,15 +1074,20 @@ struct bnx2x { }; -#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \ - : MAX_CONTEXT) -#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) -#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) +#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/(2 * E1HVN_MAX)) \ + : (MAX_CONTEXT/2)) +#define BNX2X_NUM_QUEUES(bp) (bp->num_rx_queues + bp->num_tx_queues) +#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 2) +#define for_each_rx_queue(bp, var) \ + for (var = 0; var < bp->num_rx_queues; var++) +#define for_each_tx_queue(bp, var) \ + for (var = bp->num_rx_queues; \ + var < BNX2X_NUM_QUEUES(bp); var++) #define for_each_queue(bp, var) \ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) #define for_each_nondefault_queue(bp, var) \ - for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) + for (var = 1; var < bp->num_rx_queues; var++) void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); diff --git a/trunk/drivers/net/bnx2x_main.c b/trunk/drivers/net/bnx2x_main.c index 77ba13520d87..e2cf686d1118 100644 --- a/trunk/drivers/net/bnx2x_main.c +++ b/trunk/drivers/net/bnx2x_main.c @@ -57,7 +57,7 @@ #include "bnx2x_init_ops.h" #include "bnx2x_dump.h" -#define DRV_MODULE_VERSION "1.52.1-5" +#define DRV_MODULE_VERSION "1.52.1-4" #define DRV_MODULE_RELDATE "2009/11/09" #define BNX2X_BC_VER 0x040200 @@ -91,10 +91,15 @@ module_param(multi_mode, int, 0); MODULE_PARM_DESC(multi_mode, " Multi queue mode " "(0 Disable; 1 Enable (default))"); -static int num_queues; -module_param(num_queues, int, 0); -MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" - " (default is as a number of CPUs)"); +static int num_rx_queues; +module_param(num_rx_queues, int, 0); +MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" + " (default is half number of CPUs)"); + +static int num_tx_queues; +module_param(num_tx_queues, int, 0); +MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1" + " (default is half number of CPUs)"); static int disable_tpa; module_param(disable_tpa, int, 0); @@ -553,7 +558,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); /* Rx */ - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" @@ -570,7 +575,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) } /* Tx */ - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" @@ -585,7 +590,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) /* Rings */ /* Rx */ - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); @@ -619,7 +624,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) } /* Tx */ - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); @@ -787,13 +792,21 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, barrier(); } -static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) +static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) { struct host_status_block *fpsb = fp->status_blk; + u16 rc = 0; barrier(); /* status block is written to by the chip */ - fp->fp_c_idx = fpsb->c_status_block.status_block_index; - fp->fp_u_idx = fpsb->u_status_block.status_block_index; + if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { + fp->fp_c_idx = fpsb->c_status_block.status_block_index; + rc |= 1; + } + if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) { + fp->fp_u_idx = fpsb->u_status_block.status_block_index; + rc |= 2; + } + return rc; } static u16 bnx2x_ack_int(struct bnx2x *bp) @@ -833,9 +846,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; - /* prefetch skb end pointer to speedup dev_kfree_skb() */ - prefetch(&skb->end); - DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", idx, tx_buf, skb); @@ -880,7 +890,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* release skb */ WARN_ON(!skb); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -910,28 +920,19 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) return (s16)(fp->bp->tx_ring_size) - used; } -static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) -{ - u16 hw_cons; - - /* Tell compiler that status block fields can change */ - barrier(); - hw_cons = le16_to_cpu(*fp->tx_cons_sb); - return hw_cons != fp->tx_pkt_cons; -} - -static int bnx2x_tx_int(struct bnx2x_fastpath *fp) +static void bnx2x_tx_int(struct bnx2x_fastpath *fp) { struct bnx2x *bp = fp->bp; struct netdev_queue *txq; u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; + int done = 0; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) - return -1; + return; #endif - txq = netdev_get_tx_queue(bp->dev, fp->index); + txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); hw_cons = le16_to_cpu(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; @@ -952,6 +953,7 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp) */ bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); sw_cons++; + done++; } fp->tx_pkt_cons = sw_cons; @@ -973,7 +975,6 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp) (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) netif_tx_wake_queue(txq); } - return 0; } #ifdef BCM_CNIC @@ -1560,8 +1561,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) } else { rx_buf = &fp->rx_buf_ring[bd_cons]; skb = rx_buf->skb; - prefetch(skb); - prefetch((u8 *)skb + 256); len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); pad = cqe->fast_path_cqe.placement_offset; @@ -1743,13 +1742,27 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) if (unlikely(bp->panic)) return IRQ_HANDLED; #endif + /* Handle Rx or Tx according to MSI-X vector */ + if (fp->is_rx_queue) { + prefetch(fp->rx_cons_sb); + prefetch(&fp->status_blk->u_status_block.status_block_index); + + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + + } else { + prefetch(fp->tx_cons_sb); + prefetch(&fp->status_blk->c_status_block.status_block_index); + + bnx2x_update_fpsb_idx(fp); + rmb(); + bnx2x_tx_int(fp); - /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - prefetch(fp->tx_cons_sb); - prefetch(&fp->status_blk->u_status_block.status_block_index); - prefetch(&fp->status_blk->c_status_block.status_block_index); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + /* Re-enable interrupts */ + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, + le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, + le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); + } return IRQ_HANDLED; } @@ -1784,14 +1797,31 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) mask = 0x2 << fp->sb_id; if (status & mask) { - /* Handle Rx and Tx according to SB id */ - prefetch(fp->rx_cons_sb); - prefetch(&fp->status_blk->u_status_block. - status_block_index); - prefetch(fp->tx_cons_sb); - prefetch(&fp->status_blk->c_status_block. - status_block_index); - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + /* Handle Rx or Tx according to SB id */ + if (fp->is_rx_queue) { + prefetch(fp->rx_cons_sb); + prefetch(&fp->status_blk->u_status_block. + status_block_index); + + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + + } else { + prefetch(fp->tx_cons_sb); + prefetch(&fp->status_blk->c_status_block. + status_block_index); + + bnx2x_update_fpsb_idx(fp); + rmb(); + bnx2x_tx_int(fp); + + /* Re-enable interrupts */ + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, + le16_to_cpu(fp->fp_u_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, + le16_to_cpu(fp->fp_c_idx), + IGU_INT_ENABLE, 1); + } status &= ~mask; } } @@ -2557,6 +2587,7 @@ static void bnx2x_e1h_disable(struct bnx2x *bp) int port = BP_PORT(bp); netif_tx_disable(bp->dev); + bp->dev->trans_start = jiffies; /* prevent tx timeout */ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); @@ -3996,7 +4027,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) estats->no_buff_discard_hi = 0; estats->no_buff_discard_lo = 0; - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int cl_id = fp->cl_id; struct tstorm_per_client_stats *tclient = @@ -4213,7 +4244,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); nstats->rx_dropped = estats->mac_discard; - for_each_queue(bp, i) + for_each_rx_queue(bp, i) nstats->rx_dropped += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); @@ -4267,7 +4298,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) estats->rx_err_discard_pkt = 0; estats->rx_skb_alloc_failed = 0; estats->hw_csum_err = 0; - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; estats->driver_xoff += qstats->driver_xoff; @@ -4298,7 +4329,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) if (bp->msglevel & NETIF_MSG_TIMER) { struct bnx2x_fastpath *fp0_rx = bp->fp; - struct bnx2x_fastpath *fp0_tx = bp->fp; + struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); struct tstorm_per_client_stats *old_tclient = &bp->fp->old_tclient; struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; @@ -4953,7 +4984,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) if (bp->flags & TPA_ENABLE_FLAG) { - for_each_queue(bp, j) { + for_each_rx_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; for (i = 0; i < max_agg_queues; i++) { @@ -4976,13 +5007,16 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) } } - for_each_queue(bp, j) { + for_each_rx_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; fp->rx_bd_cons = 0; fp->rx_cons_sb = BNX2X_RX_SB_INDEX; fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; + /* Mark queue as Rx */ + fp->is_rx_queue = 1; + /* "next page" elements initialization */ /* SGE ring */ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { @@ -5088,7 +5122,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) { int i, j; - for_each_queue(bp, j) { + for_each_tx_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; for (i = 1; i <= NUM_TX_RINGS; i++) { @@ -5114,6 +5148,10 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp) fp->tx_cons_sb = BNX2X_TX_SB_INDEX; fp->tx_pkt = 0; } + + /* clean tx statistics */ + for_each_rx_queue(bp, i) + bnx2x_fp(bp, i, tx_pkt) = 0; } static void bnx2x_init_sp_ring(struct bnx2x *bp) @@ -5142,8 +5180,7 @@ static void bnx2x_init_context(struct bnx2x *bp) { int i; - /* Rx */ - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct eth_context *context = bnx2x_sp(bp, context[i].eth); struct bnx2x_fastpath *fp = &bp->fp[i]; u8 cl_id = fp->cl_id; @@ -5195,11 +5232,10 @@ static void bnx2x_init_context(struct bnx2x *bp) ETH_CONNECTION_TYPE); } - /* Tx */ - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; struct eth_context *context = - bnx2x_sp(bp, context[i].eth); + bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); context->cstorm_st_context.sb_index_number = C_SB_ETH_TX_CQ_INDEX; @@ -5227,7 +5263,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, - bp->fp->cl_id + (i % bp->num_queues)); + bp->fp->cl_id + (i % bp->num_rx_queues)); } static void bnx2x_set_client_config(struct bnx2x *bp) @@ -5471,7 +5507,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE), (u32)0xffff); - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; REG_WR(bp, BAR_USTRORM_INTMEM + @@ -5506,7 +5542,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) rx_pause.cqe_thr_high = 350; rx_pause.sge_thr_high = 0; - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; if (!fp->disable_tpa) { @@ -5601,6 +5637,9 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) #else fp->sb_id = fp->cl_id; #endif + /* Suitable Rx and Tx SBs are served by the same client */ + if (i >= bp->num_rx_queues) + fp->cl_id -= bp->num_rx_queues; DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", i, bp, fp->status_blk, fp->cl_id, fp->sb_id); @@ -6710,7 +6749,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) sizeof(struct host_status_block)); } /* Rx */ - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { /* fastpath rx rings: rx_buf rx_desc rx_comp */ BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); @@ -6730,7 +6769,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); } /* Tx */ - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { /* fastpath tx rings: tx_buf tx_desc */ BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); @@ -6792,7 +6831,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) sizeof(struct host_status_block)); } /* Rx */ - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { /* fastpath rx rings: rx_buf rx_desc rx_comp */ BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), @@ -6814,7 +6853,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); } /* Tx */ - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { /* fastpath tx rings: tx_buf tx_desc */ BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), @@ -6870,7 +6909,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) { int i; - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; u16 bd_cons = fp->tx_bd_cons; @@ -6888,7 +6927,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) { int i, j; - for_each_queue(bp, j) { + for_each_rx_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; for (i = 0; i < NUM_RX_BD; i++) { @@ -7003,8 +7042,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) #endif for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", - bp->dev->name, i); + + if (i < bp->num_rx_queues) + sprintf(fp->name, "%s-rx-%d", bp->dev->name, i); + else + sprintf(fp->name, "%s-tx-%d", + bp->dev->name, i - bp->num_rx_queues); rc = request_irq(bp->msix_table[i + offset].vector, bnx2x_msix_fp_int, 0, fp->name, fp); @@ -7063,7 +7106,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) { int i; - for_each_queue(bp, i) + for_each_rx_queue(bp, i) napi_enable(&bnx2x_fp(bp, i, napi)); } @@ -7071,7 +7114,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - for_each_queue(bp, i) + for_each_rx_queue(bp, i) napi_disable(&bnx2x_fp(bp, i, napi)); } @@ -7097,6 +7140,7 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_int_disable_sync(bp, disable_hw); bnx2x_napi_disable(bp); netif_tx_disable(bp->dev); + bp->dev->trans_start = jiffies; /* prevent tx timeout */ } /* @@ -7366,60 +7410,88 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) static int bnx2x_poll(struct napi_struct *napi, int budget); -static void bnx2x_set_num_queues_msix(struct bnx2x *bp) +static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, + int *num_tx_queues_out) { + int _num_rx_queues = 0, _num_tx_queues = 0; switch (bp->multi_mode) { case ETH_RSS_MODE_DISABLED: - bp->num_queues = 1; + _num_rx_queues = 1; + _num_tx_queues = 1; break; case ETH_RSS_MODE_REGULAR: - if (num_queues) - bp->num_queues = min_t(u32, num_queues, - BNX2X_MAX_QUEUES(bp)); + if (num_rx_queues) + _num_rx_queues = min_t(u32, num_rx_queues, + BNX2X_MAX_QUEUES(bp)); + else + _num_rx_queues = min_t(u32, num_online_cpus(), + BNX2X_MAX_QUEUES(bp)); + + if (num_tx_queues) + _num_tx_queues = min_t(u32, num_tx_queues, + BNX2X_MAX_QUEUES(bp)); else - bp->num_queues = min_t(u32, num_online_cpus(), - BNX2X_MAX_QUEUES(bp)); + _num_tx_queues = min_t(u32, num_online_cpus(), + BNX2X_MAX_QUEUES(bp)); + + /* There must be not more Tx queues than Rx queues */ + if (_num_tx_queues > _num_rx_queues) { + BNX2X_ERR("number of tx queues (%d) > " + "number of rx queues (%d)" + " defaulting to %d\n", + _num_tx_queues, _num_rx_queues, + _num_rx_queues); + _num_tx_queues = _num_rx_queues; + } break; default: - bp->num_queues = 1; + _num_rx_queues = 1; + _num_tx_queues = 1; break; } + + *num_rx_queues_out = _num_rx_queues; + *num_tx_queues_out = _num_tx_queues; } -static int bnx2x_set_num_queues(struct bnx2x *bp) +static int bnx2x_set_int_mode(struct bnx2x *bp) { int rc = 0; switch (int_mode) { case INT_MODE_INTx: case INT_MODE_MSI: - bp->num_queues = 1; + bp->num_rx_queues = 1; + bp->num_tx_queues = 1; DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); break; case INT_MODE_MSIX: default: - /* Set number of queues according to bp->multi_mode value */ - bnx2x_set_num_queues_msix(bp); + /* Set interrupt mode according to bp->multi_mode value */ + bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, + &bp->num_tx_queues); - DP(NETIF_MSG_IFUP, "set number of queues to %d\n", - bp->num_queues); + DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", + bp->num_rx_queues, bp->num_tx_queues); /* if we can't use MSI-X we only need one fp, * so try to enable MSI-X with the requested number of fp's * and fallback to MSI or legacy INTx with one fp */ rc = bnx2x_enable_msix(bp); - if (rc) + if (rc) { /* failed to enable MSI-X */ - bp->num_queues = 1; + bp->num_rx_queues = 1; + bp->num_tx_queues = 1; + } break; } - bp->dev->real_num_tx_queues = bp->num_queues; + bp->dev->real_num_tx_queues = bp->num_tx_queues; return rc; } @@ -7441,16 +7513,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; - rc = bnx2x_set_num_queues(bp); + rc = bnx2x_set_int_mode(bp); if (bnx2x_alloc_mem(bp)) return -ENOMEM; - for_each_queue(bp, i) + for_each_rx_queue(bp, i) bnx2x_fp(bp, i, disable_tpa) = ((bp->flags & TPA_ENABLE_FLAG) == 0); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, 128); @@ -7464,7 +7536,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } } else { /* Fall to INTx if failed to enable MSI-X due to lack of - memory (in bnx2x_set_num_queues()) */ + memory (in bnx2x_set_int_mode()) */ if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) bnx2x_enable_msi(bp); bnx2x_ack_int(bp); @@ -7658,14 +7730,14 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->port.pmf = 0; /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); load_error2: /* Release IRQs */ bnx2x_free_irq(bp); load_error1: bnx2x_napi_disable(bp); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); bnx2x_free_mem(bp); @@ -7856,7 +7928,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bnx2x_free_irq(bp); /* Wait until tx fastpath tasks complete */ - for_each_queue(bp, i) { + for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; cnt = 1000; @@ -7999,9 +8071,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); bnx2x_free_mem(bp); @@ -10197,7 +10269,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) struct sk_buff *skb; unsigned char *packet; struct bnx2x_fastpath *fp_rx = &bp->fp[0]; - struct bnx2x_fastpath *fp_tx = &bp->fp[0]; + struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; u16 tx_start_idx, tx_idx; u16 rx_start_idx, rx_idx; u16 pkt_prod, bd_prod; @@ -10274,12 +10346,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) fp_tx->tx_db.data.prod += 2; barrier(); - DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); + DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); mmiowb(); num_pkts++; fp_tx->tx_bd_prod += 2; /* start + pbd */ + bp->dev->trans_start = jiffies; udelay(100); @@ -10652,7 +10725,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) switch(stringset) { case ETH_SS_STATS: if (is_multi(bp)) { - num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; + num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues; if (!IS_E1HMF_MODE_STAT(bp)) num_stats += BNX2X_NUM_STATS; } else { @@ -10683,7 +10756,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) case ETH_SS_STATS: if (is_multi(bp)) { k = 0; - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { for (j = 0; j < BNX2X_NUM_Q_STATS; j++) sprintf(buf + (k + j)*ETH_GSTRING_LEN, bnx2x_q_stats_arr[j].string, i); @@ -10720,7 +10793,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, if (is_multi(bp)) { k = 0; - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { hw_stats = (u32 *)&bp->fp[i].eth_q_stats; for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { if (bnx2x_q_stats_arr[j].size == 0) { @@ -10916,60 +10989,54 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) static int bnx2x_poll(struct napi_struct *napi, int budget) { - int work_done = 0; struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, napi); struct bnx2x *bp = fp->bp; + int work_done = 0; - while (1) { #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - napi_complete(napi); - return 0; - } + if (unlikely(bp->panic)) + goto poll_panic; #endif - if (bnx2x_has_tx_work(fp)) - bnx2x_tx_int(fp); + prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); + prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); - if (bnx2x_has_rx_work(fp)) { - work_done += bnx2x_rx_int(fp, budget - work_done); + bnx2x_update_fpsb_idx(fp); - /* must not complete if we consumed full budget */ - if (work_done >= budget) - break; - } + if (bnx2x_has_rx_work(fp)) { + work_done = bnx2x_rx_int(fp, budget); - /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - bnx2x_update_fpsb_idx(fp); - /* bnx2x_has_rx_work() reads the status block, thus we need - * to ensure that status block indices have been actually read - * (bnx2x_update_fpsb_idx) prior to this check - * (bnx2x_has_rx_work) so that we won't write the "newer" - * value of the status block to IGU (if there was a DMA right - * after bnx2x_has_rx_work and if there is no rmb, the memory - * reading (bnx2x_update_fpsb_idx) may be postponed to right - * before bnx2x_ack_sb). In this case there will never be - * another interrupt until there is another update of the - * status block, while there is still unhandled work. - */ - rmb(); + /* must not complete if we consumed full budget */ + if (work_done >= budget) + goto poll_again; + } - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { - napi_complete(napi); - /* Re-enable interrupts */ - bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, - le16_to_cpu(fp->fp_c_idx), - IGU_INT_NOP, 1); - bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, - le16_to_cpu(fp->fp_u_idx), - IGU_INT_ENABLE, 1); - break; - } - } + /* bnx2x_has_rx_work() reads the status block, thus we need to + * ensure that status block indices have been actually read + * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) + * so that we won't write the "newer" value of the status block to IGU + * (if there was a DMA right after bnx2x_has_rx_work and + * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx) + * may be postponed to right before bnx2x_ack_sb). In this case + * there will never be another interrupt until there is another update + * of the status block, while there is still unhandled work. + */ + rmb(); + + if (!bnx2x_has_rx_work(fp)) { +#ifdef BNX2X_STOP_ON_ERROR +poll_panic: +#endif + napi_complete(napi); + + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, + le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, + le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); } +poll_again: return work_done; } @@ -11154,7 +11221,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - struct bnx2x_fastpath *fp; + struct bnx2x_fastpath *fp, *fp_stat; struct netdev_queue *txq; struct sw_tx_bd *tx_buf; struct eth_tx_start_bd *tx_start_bd; @@ -11176,10 +11243,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) fp_index = skb_get_queue_mapping(skb); txq = netdev_get_tx_queue(dev, fp_index); - fp = &bp->fp[fp_index]; + fp = &bp->fp[fp_index + bp->num_rx_queues]; + fp_stat = &bp->fp[fp_index]; if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { - fp->eth_q_stats.driver_xoff++; + fp_stat->eth_q_stats.driver_xoff++; netif_tx_stop_queue(txq); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); return NETDEV_TX_BUSY; @@ -11405,7 +11473,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) fp->tx_db.data.prod += nbd; barrier(); - DOORBELL(bp, fp->index, fp->tx_db.raw); + DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); mmiowb(); @@ -11416,11 +11484,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* We want bnx2x_tx_int to "see" the updated tx_bd_prod if we put Tx into XOFF state. */ smp_mb(); - fp->eth_q_stats.driver_xoff++; + fp_stat->eth_q_stats.driver_xoff++; if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) netif_tx_wake_queue(txq); } - fp->tx_pkt++; + fp_stat->tx_pkt++; return NETDEV_TX_OK; } @@ -12308,9 +12376,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - for_each_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); bnx2x_free_mem(bp); diff --git a/trunk/drivers/net/bonding/bond_3ad.c b/trunk/drivers/net/bonding/bond_3ad.c index 88c3fe80b355..1d0581923287 100644 --- a/trunk/drivers/net/bonding/bond_3ad.c +++ b/trunk/drivers/net/bonding/bond_3ad.c @@ -445,48 +445,6 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par) // ================= ad_rx_machine helper functions ================== ///////////////////////////////////////////////////////////////////////////////// -/** - * __choose_matched - update a port's matched variable from a received lacpdu - * @lacpdu: the lacpdu we've received - * @port: the port we're looking at - * - * Update the value of the matched variable, using parameter values from a - * newly received lacpdu. Parameter values for the partner carried in the - * received PDU are compared with the corresponding operational parameter - * values for the actor. Matched is set to TRUE if all of these parameters - * match and the PDU parameter partner_state.aggregation has the same value as - * actor_oper_port_state.aggregation and lacp will actively maintain the link - * in the aggregation. Matched is also set to TRUE if the value of - * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates - * an individual link and lacp will actively maintain the link. Otherwise, - * matched is set to FALSE. LACP is considered to be actively maintaining the - * link if either the PDU's actor_state.lacp_activity variable is TRUE or both - * the actor's actor_oper_port_state.lacp_activity and the PDU's - * partner_state.lacp_activity variables are TRUE. - * - * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is - * used here to implement the language from 802.3ad 43.4.9 that requires - * recordPDU to "match" the LACPDU parameters to the stored values. - */ -static void __choose_matched(struct lacpdu *lacpdu, struct port *port) -{ - // check if all parameters are alike - if (((ntohs(lacpdu->partner_port) == port->actor_port_number) && - (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) && - !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) && - (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) && - (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) && - ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) || - // or this is individual link(aggregation == FALSE) - ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0) - ) { - // update the state machine Matched variable - port->sm_vars |= AD_PORT_MATCHED; - } else { - port->sm_vars &= ~AD_PORT_MATCHED; - } -} - /** * __record_pdu - record parameters from a received lacpdu * @lacpdu: the lacpdu we've received @@ -501,7 +459,6 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port) if (lacpdu && port) { struct port_params *partner = &port->partner_oper; - __choose_matched(lacpdu, port); // record the new parameter values for the partner operational partner->port_number = ntohs(lacpdu->actor_port); partner->port_priority = ntohs(lacpdu->actor_port_priority); @@ -605,6 +562,47 @@ static void __update_default_selected(struct port *port) } } +/** + * __choose_matched - update a port's matched variable from a received lacpdu + * @lacpdu: the lacpdu we've received + * @port: the port we're looking at + * + * Update the value of the matched variable, using parameter values from a + * newly received lacpdu. Parameter values for the partner carried in the + * received PDU are compared with the corresponding operational parameter + * values for the actor. Matched is set to TRUE if all of these parameters + * match and the PDU parameter partner_state.aggregation has the same value as + * actor_oper_port_state.aggregation and lacp will actively maintain the link + * in the aggregation. Matched is also set to TRUE if the value of + * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates + * an individual link and lacp will actively maintain the link. Otherwise, + * matched is set to FALSE. LACP is considered to be actively maintaining the + * link if either the PDU's actor_state.lacp_activity variable is TRUE or both + * the actor's actor_oper_port_state.lacp_activity and the PDU's + * partner_state.lacp_activity variables are TRUE. + */ +static void __choose_matched(struct lacpdu *lacpdu, struct port *port) +{ + // validate lacpdu and port + if (lacpdu && port) { + // check if all parameters are alike + if (((ntohs(lacpdu->partner_port) == port->actor_port_number) && + (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) && + !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) && + (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) && + (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) && + ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) || + // or this is individual link(aggregation == FALSE) + ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0) + ) { + // update the state machine Matched variable + port->sm_vars |= AD_PORT_MATCHED; + } else { + port->sm_vars &= ~AD_PORT_MATCHED; + } + } +} + /** * __update_ntt - update a port's ntt variable from a received lacpdu * @lacpdu: the lacpdu we've received @@ -1136,6 +1134,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) __update_selected(lacpdu, port); __update_ntt(lacpdu, port); __record_pdu(lacpdu, port); + __choose_matched(lacpdu, port); port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); port->actor_oper_port_state &= ~AD_STATE_EXPIRED; // verify that if the aggregator is enabled, the port is enabled too. diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 726bd755338f..ecea6c294132 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -158,7 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the static const char * const version = DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; -int bond_net_id __read_mostly; +int bond_net_id; static __be32 arp_target[BOND_MAX_ARP_TARGETS]; static int arp_ip_count; diff --git a/trunk/drivers/net/can/Kconfig b/trunk/drivers/net/can/Kconfig index bb803fa1e6a7..b819cc2a429e 100644 --- a/trunk/drivers/net/can/Kconfig +++ b/trunk/drivers/net/can/Kconfig @@ -35,9 +35,63 @@ config CAN_CALC_BITTIMING arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". If unsure, say Y. +config CAN_SJA1000 + depends on CAN_DEV && HAS_IOMEM + tristate "Philips SJA1000" + ---help--- + Driver for the SJA1000 CAN controllers from Philips or NXP + +config CAN_SJA1000_ISA + depends on CAN_SJA1000 && ISA + tristate "ISA Bus based legacy SJA1000 driver" + ---help--- + This driver adds legacy support for SJA1000 chips connected to + the ISA bus using I/O port, memory mapped or indirect access. + +config CAN_SJA1000_PLATFORM + depends on CAN_SJA1000 + tristate "Generic Platform Bus based SJA1000 driver" + ---help--- + This driver adds support for the SJA1000 chips connected to + the "platform bus" (Linux abstraction for directly to the + processor attached devices). Which can be found on various + boards from Phytec (http://www.phytec.de) like the PCM027, + PCM038. + +config CAN_SJA1000_OF_PLATFORM + depends on CAN_SJA1000 && PPC_OF + tristate "Generic OF Platform Bus based SJA1000 driver" + ---help--- + This driver adds support for the SJA1000 chips connected to + the OpenFirmware "platform bus" found on embedded systems with + OpenFirmware bindings, e.g. if you have a PowerPC based system + you may want to enable this option. + +config CAN_EMS_PCI + tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" + depends on PCI && CAN_SJA1000 + ---help--- + This driver is for the one, two or four channel CPC-PCI, + CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche + (http://www.ems-wuensche.de). + +config CAN_EMS_USB + tristate "EMS CPC-USB/ARM7 CAN/USB interface" + depends on USB && CAN_DEV + ---help--- + This driver is for the one channel CPC-USB/ARM7 CAN/USB interface + from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). + +config CAN_KVASER_PCI + tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" + depends on PCI && CAN_SJA1000 + ---help--- + This driver is for the the PCIcanx and PCIcan cards (1, 2 or + 4 channel) from Kvaser (http://www.kvaser.com). + config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" - depends on CAN_DEV && ARCH_AT91SAM9263 + depends on CAN && CAN_DEV && ARCH_AT91SAM9263 ---help--- This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. @@ -54,12 +108,6 @@ config CAN_MCP251X ---help--- Driver for the Microchip MCP251x SPI CAN controllers. -source "drivers/net/can/mscan/Kconfig" - -source "drivers/net/can/sja1000/Kconfig" - -source "drivers/net/can/usb/Kconfig" - config CAN_DEBUG_DEVICES bool "CAN devices debugging messages" depends on CAN diff --git a/trunk/drivers/net/can/Makefile b/trunk/drivers/net/can/Makefile index 56899fef1c6a..14891817ea5b 100644 --- a/trunk/drivers/net/can/Makefile +++ b/trunk/drivers/net/can/Makefile @@ -10,7 +10,6 @@ can-dev-y := dev.o obj-y += usb/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ -obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o diff --git a/trunk/drivers/net/can/dev.c b/trunk/drivers/net/can/dev.c index c1bb29f0322b..26c89aaeba62 100644 --- a/trunk/drivers/net/can/dev.c +++ b/trunk/drivers/net/can/dev.c @@ -677,11 +677,6 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) return -EMSGSIZE; } -static size_t can_get_xstats_size(const struct net_device *dev) -{ - return sizeof(struct can_device_stats); -} - static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -710,7 +705,6 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { .changelink = can_changelink, .get_size = can_get_size, .fill_info = can_fill_info, - .get_xstats_size = can_get_xstats_size, .fill_xstats = can_fill_xstats, }; diff --git a/trunk/drivers/net/can/mcp251x.c b/trunk/drivers/net/can/mcp251x.c index 78b1b69b2921..8f48f4b50b7c 100644 --- a/trunk/drivers/net/can/mcp251x.c +++ b/trunk/drivers/net/can/mcp251x.c @@ -594,7 +594,13 @@ static int mcp251x_do_set_bittiming(struct net_device *net) static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv, struct spi_device *spi) { - mcp251x_do_set_bittiming(net); + int ret; + + ret = open_candev(net); + if (ret) { + dev_err(&spi->dev, "unable to set initial baudrate!\n"); + return ret; + } /* Enable RX0->RX1 buffer roll over and disable filters */ mcp251x_write_bits(spi, RXBCTRL(0), @@ -665,12 +671,6 @@ static int mcp251x_open(struct net_device *net) struct mcp251x_platform_data *pdata = spi->dev.platform_data; int ret; - ret = open_candev(net); - if (ret) { - dev_err(&spi->dev, "unable to set initial baudrate!\n"); - return ret; - } - if (pdata->transceiver_enable) pdata->transceiver_enable(1); @@ -684,7 +684,6 @@ static int mcp251x_open(struct net_device *net) dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); if (pdata->transceiver_enable) pdata->transceiver_enable(0); - close_candev(net); return ret; } @@ -693,10 +692,8 @@ static int mcp251x_open(struct net_device *net) ret = mcp251x_setup(net, priv, spi); if (ret) { free_irq(spi->irq, net); - mcp251x_hw_sleep(spi); if (pdata->transceiver_enable) pdata->transceiver_enable(0); - close_candev(net); return ret; } mcp251x_set_normal_mode(spi); @@ -959,6 +956,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi) priv->can.bittiming_const = &mcp251x_bittiming_const; priv->can.do_set_mode = mcp251x_do_set_mode; priv->can.clock.freq = pdata->oscillator_frequency / 2; + priv->can.do_set_bittiming = mcp251x_do_set_bittiming; priv->net = net; dev_set_drvdata(&spi->dev, priv); diff --git a/trunk/drivers/net/can/mscan/Kconfig b/trunk/drivers/net/can/mscan/Kconfig deleted file mode 100644 index cd0f2d6f375d..000000000000 --- a/trunk/drivers/net/can/mscan/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ -config CAN_MSCAN - depends on CAN_DEV && (PPC || M68K || M68KNOMMU) - tristate "Support for Freescale MSCAN based chips" - ---help--- - The Motorola Scalable Controller Area Network (MSCAN) definition - is based on the MSCAN12 definition which is the specific - implementation of the Motorola Scalable CAN concept targeted for - the Motorola MC68HC12 Microcontroller Family. - -if CAN_MSCAN - -config CAN_MPC5XXX - tristate "Freescale MPC5xxx onboard CAN controller" - depends on PPC_MPC52xx - ---help--- - If you say yes here you get support for Freescale's MPC5xxx - onboard CAN controller. - - This driver can also be built as a module. If so, the module - will be called mscan-mpc5xxx.ko. - -endif - diff --git a/trunk/drivers/net/can/mscan/Makefile b/trunk/drivers/net/can/mscan/Makefile deleted file mode 100644 index c9fab17cd8b4..000000000000 --- a/trunk/drivers/net/can/mscan/Makefile +++ /dev/null @@ -1,5 +0,0 @@ - -obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o -mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/trunk/drivers/net/can/mscan/mpc5xxx_can.c b/trunk/drivers/net/can/mscan/mpc5xxx_can.c deleted file mode 100644 index 1de6f6349b16..000000000000 --- a/trunk/drivers/net/can/mscan/mpc5xxx_can.c +++ /dev/null @@ -1,259 +0,0 @@ -/* - * CAN bus driver for the Freescale MPC5xxx embedded CPU. - * - * Copyright (C) 2004-2005 Andrey Volkov , - * Varma Electronics Oy - * Copyright (C) 2008-2009 Wolfgang Grandegger - * Copyright (C) 2009 Wolfram Sang, Pengutronix - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mscan.h" - -#define DRV_NAME "mpc5xxx_can" - -static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = { - { .compatible = "fsl,mpc5200-cdm", }, - {} -}; - -/* - * Get frequency of the MSCAN clock source - * - * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK) - * can be selected. According to the MPC5200 user's manual, the oscillator - * clock is the better choice as it has less jitter but due to a hardware - * bug, it can not be selected for the old MPC5200 Rev. A chips. - */ - -static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of, - int clock_src) -{ - unsigned int pvr; - struct mpc52xx_cdm __iomem *cdm; - struct device_node *np_cdm; - unsigned int freq; - u32 val; - - pvr = mfspr(SPRN_PVR); - - freq = mpc5xxx_get_bus_frequency(of->node); - if (!freq) - return 0; - - if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011) - return freq; - - /* Determine SYS_XTAL_IN frequency from the clock domain settings */ - np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); - if (!np_cdm) { - dev_err(&of->dev, "can't get clock node!\n"); - return 0; - } - cdm = of_iomap(np_cdm, 0); - of_node_put(np_cdm); - - if (in_8(&cdm->ipb_clk_sel) & 0x1) - freq *= 2; - val = in_be32(&cdm->rstcfg); - - freq *= (val & (1 << 5)) ? 8 : 4; - freq /= (val & (1 << 6)) ? 12 : 16; - - iounmap(cdm); - - return freq; -} - -static int __devinit mpc5xxx_can_probe(struct of_device *ofdev, - const struct of_device_id *id) -{ - struct device_node *np = ofdev->node; - struct net_device *dev; - struct mscan_priv *priv; - void __iomem *base; - const char *clk_src; - int err, irq, clock_src; - - base = of_iomap(ofdev->node, 0); - if (!base) { - dev_err(&ofdev->dev, "couldn't ioremap\n"); - err = -ENOMEM; - goto exit_release_mem; - } - - irq = irq_of_parse_and_map(np, 0); - if (!irq) { - dev_err(&ofdev->dev, "no irq found\n"); - err = -ENODEV; - goto exit_unmap_mem; - } - - dev = alloc_mscandev(); - if (!dev) { - err = -ENOMEM; - goto exit_dispose_irq; - } - - priv = netdev_priv(dev); - priv->reg_base = base; - dev->irq = irq; - - /* - * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock - * (IP_CLK) can be selected as MSCAN clock source. According to - * the MPC5200 user's manual, the oscillator clock is the better - * choice as it has less jitter. For this reason, it is selected - * by default. - */ - clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL); - if (clk_src && strcmp(clk_src, "ip") == 0) - clock_src = MSCAN_CLKSRC_BUS; - else - clock_src = MSCAN_CLKSRC_XTAL; - priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src); - if (!priv->can.clock.freq) { - dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n"); - err = -ENODEV; - goto exit_free_mscan; - } - - SET_NETDEV_DEV(dev, &ofdev->dev); - - err = register_mscandev(dev, clock_src); - if (err) { - dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", - DRV_NAME, err); - goto exit_free_mscan; - } - - dev_set_drvdata(&ofdev->dev, dev); - - dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", - priv->reg_base, dev->irq, priv->can.clock.freq); - - return 0; - -exit_free_mscan: - free_candev(dev); -exit_dispose_irq: - irq_dispose_mapping(irq); -exit_unmap_mem: - iounmap(base); -exit_release_mem: - return err; -} - -static int __devexit mpc5xxx_can_remove(struct of_device *ofdev) -{ - struct net_device *dev = dev_get_drvdata(&ofdev->dev); - struct mscan_priv *priv = netdev_priv(dev); - - dev_set_drvdata(&ofdev->dev, NULL); - - unregister_mscandev(dev); - iounmap(priv->reg_base); - irq_dispose_mapping(dev->irq); - free_candev(dev); - - return 0; -} - -#ifdef CONFIG_PM -static struct mscan_regs saved_regs; -static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state) -{ - struct net_device *dev = dev_get_drvdata(&ofdev->dev); - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - - _memcpy_fromio(&saved_regs, regs, sizeof(*regs)); - - return 0; -} - -static int mpc5xxx_can_resume(struct of_device *ofdev) -{ - struct net_device *dev = dev_get_drvdata(&ofdev->dev); - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - - regs->canctl0 |= MSCAN_INITRQ; - while (!(regs->canctl1 & MSCAN_INITAK)) - udelay(10); - - regs->canctl1 = saved_regs.canctl1; - regs->canbtr0 = saved_regs.canbtr0; - regs->canbtr1 = saved_regs.canbtr1; - regs->canidac = saved_regs.canidac; - - /* restore masks, buffers etc. */ - _memcpy_toio(®s->canidar1_0, (void *)&saved_regs.canidar1_0, - sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0)); - - regs->canctl0 &= ~MSCAN_INITRQ; - regs->cantbsel = saved_regs.cantbsel; - regs->canrier = saved_regs.canrier; - regs->cantier = saved_regs.cantier; - regs->canctl0 = saved_regs.canctl0; - - return 0; -} -#endif - -static struct of_device_id __devinitdata mpc5xxx_can_table[] = { - {.compatible = "fsl,mpc5200-mscan"}, - {}, -}; - -static struct of_platform_driver mpc5xxx_can_driver = { - .owner = THIS_MODULE, - .name = "mpc5xxx_can", - .probe = mpc5xxx_can_probe, - .remove = __devexit_p(mpc5xxx_can_remove), -#ifdef CONFIG_PM - .suspend = mpc5xxx_can_suspend, - .resume = mpc5xxx_can_resume, -#endif - .match_table = mpc5xxx_can_table, -}; - -static int __init mpc5xxx_can_init(void) -{ - return of_register_platform_driver(&mpc5xxx_can_driver); -} -module_init(mpc5xxx_can_init); - -static void __exit mpc5xxx_can_exit(void) -{ - return of_unregister_platform_driver(&mpc5xxx_can_driver); -}; -module_exit(mpc5xxx_can_exit); - -MODULE_AUTHOR("Wolfgang Grandegger "); -MODULE_DESCRIPTION("Freescale MPC5200 CAN driver"); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/net/can/mscan/mscan.c b/trunk/drivers/net/can/mscan/mscan.c deleted file mode 100644 index bb06dfb58f25..000000000000 --- a/trunk/drivers/net/can/mscan/mscan.c +++ /dev/null @@ -1,668 +0,0 @@ -/* - * CAN bus driver for the alone generic (as possible as) MSCAN controller. - * - * Copyright (C) 2005-2006 Andrey Volkov , - * Varma Electronics Oy - * Copyright (C) 2008-2009 Wolfgang Grandegger - * Copytight (C) 2008-2009 Pengutronix - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mscan.h" - -static struct can_bittiming_const mscan_bittiming_const = { - .name = "mscan", - .tseg1_min = 4, - .tseg1_max = 16, - .tseg2_min = 2, - .tseg2_max = 8, - .sjw_max = 4, - .brp_min = 1, - .brp_max = 64, - .brp_inc = 1, -}; - -struct mscan_state { - u8 mode; - u8 canrier; - u8 cantier; -}; - -static enum can_state state_map[] = { - CAN_STATE_ERROR_ACTIVE, - CAN_STATE_ERROR_WARNING, - CAN_STATE_ERROR_PASSIVE, - CAN_STATE_BUS_OFF -}; - -static int mscan_set_mode(struct net_device *dev, u8 mode) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - int ret = 0; - int i; - u8 canctl1; - - if (mode != MSCAN_NORMAL_MODE) { - if (priv->tx_active) { - /* Abort transfers before going to sleep */# - out_8(®s->cantarq, priv->tx_active); - /* Suppress TX done interrupts */ - out_8(®s->cantier, 0); - } - - canctl1 = in_8(®s->canctl1); - if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) { - setbits8(®s->canctl0, MSCAN_SLPRQ); - for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { - if (in_8(®s->canctl1) & MSCAN_SLPAK) - break; - udelay(100); - } - /* - * The mscan controller will fail to enter sleep mode, - * while there are irregular activities on bus, like - * somebody keeps retransmitting. This behavior is - * undocumented and seems to differ between mscan built - * in mpc5200b and mpc5200. We proceed in that case, - * since otherwise the slprq will be kept set and the - * controller will get stuck. NOTE: INITRQ or CSWAI - * will abort all active transmit actions, if still - * any, at once. - */ - if (i >= MSCAN_SET_MODE_RETRIES) - dev_dbg(dev->dev.parent, - "device failed to enter sleep mode. " - "We proceed anyhow.\n"); - else - priv->can.state = CAN_STATE_SLEEPING; - } - - if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) { - setbits8(®s->canctl0, MSCAN_INITRQ); - for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { - if (in_8(®s->canctl1) & MSCAN_INITAK) - break; - } - if (i >= MSCAN_SET_MODE_RETRIES) - ret = -ENODEV; - } - if (!ret) - priv->can.state = CAN_STATE_STOPPED; - - if (mode & MSCAN_CSWAI) - setbits8(®s->canctl0, MSCAN_CSWAI); - - } else { - canctl1 = in_8(®s->canctl1); - if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) { - clrbits8(®s->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); - for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { - canctl1 = in_8(®s->canctl1); - if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK))) - break; - } - if (i >= MSCAN_SET_MODE_RETRIES) - ret = -ENODEV; - else - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - } - return ret; -} - -static int mscan_start(struct net_device *dev) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - u8 canrflg; - int err; - - out_8(®s->canrier, 0); - - INIT_LIST_HEAD(&priv->tx_head); - priv->prev_buf_id = 0; - priv->cur_pri = 0; - priv->tx_active = 0; - priv->shadow_canrier = 0; - priv->flags = 0; - - err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); - if (err) - return err; - - canrflg = in_8(®s->canrflg); - priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; - priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg), - MSCAN_STATE_TX(canrflg))]; - out_8(®s->cantier, 0); - - /* Enable receive interrupts. */ - out_8(®s->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | - MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0); - - return 0; -} - -static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct can_frame *frame = (struct can_frame *)skb->data; - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - int i, rtr, buf_id; - u32 can_id; - - if (frame->can_dlc > 8) - return -EINVAL; - - out_8(®s->cantier, 0); - - i = ~priv->tx_active & MSCAN_TXE; - buf_id = ffs(i) - 1; - switch (hweight8(i)) { - case 0: - netif_stop_queue(dev); - dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n"); - return NETDEV_TX_BUSY; - case 1: - /* - * if buf_id < 3, then current frame will be send out of order, - * since buffer with lower id have higher priority (hell..) - */ - netif_stop_queue(dev); - case 2: - if (buf_id < priv->prev_buf_id) { - priv->cur_pri++; - if (priv->cur_pri == 0xff) { - set_bit(F_TX_WAIT_ALL, &priv->flags); - netif_stop_queue(dev); - } - } - set_bit(F_TX_PROGRESS, &priv->flags); - break; - } - priv->prev_buf_id = buf_id; - out_8(®s->cantbsel, i); - - rtr = frame->can_id & CAN_RTR_FLAG; - - /* RTR is always the lowest bit of interest, then IDs follow */ - if (frame->can_id & CAN_EFF_FLAG) { - can_id = (frame->can_id & CAN_EFF_MASK) - << (MSCAN_EFF_RTR_SHIFT + 1); - if (rtr) - can_id |= 1 << MSCAN_EFF_RTR_SHIFT; - out_be16(®s->tx.idr3_2, can_id); - - can_id >>= 16; - /* EFF_FLAGS are inbetween the IDs :( */ - can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) - | MSCAN_EFF_FLAGS; - } else { - can_id = (frame->can_id & CAN_SFF_MASK) - << (MSCAN_SFF_RTR_SHIFT + 1); - if (rtr) - can_id |= 1 << MSCAN_SFF_RTR_SHIFT; - } - out_be16(®s->tx.idr1_0, can_id); - - if (!rtr) { - void __iomem *data = ®s->tx.dsr1_0; - u16 *payload = (u16 *)frame->data; - - /* It is safe to write into dsr[dlc+1] */ - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { - out_be16(data, *payload++); - data += 2 + _MSCAN_RESERVED_DSR_SIZE; - } - } - - out_8(®s->tx.dlr, frame->can_dlc); - out_8(®s->tx.tbpr, priv->cur_pri); - - /* Start transmission. */ - out_8(®s->cantflg, 1 << buf_id); - - if (!test_bit(F_TX_PROGRESS, &priv->flags)) - dev->trans_start = jiffies; - - list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); - - can_put_echo_skb(skb, dev, buf_id); - - /* Enable interrupt. */ - priv->tx_active |= 1 << buf_id; - out_8(®s->cantier, priv->tx_active); - - return NETDEV_TX_OK; -} - -/* This function returns the old state to see where we came from */ -static enum can_state check_set_state(struct net_device *dev, u8 canrflg) -{ - struct mscan_priv *priv = netdev_priv(dev); - enum can_state state, old_state = priv->can.state; - - if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) { - state = state_map[max(MSCAN_STATE_RX(canrflg), - MSCAN_STATE_TX(canrflg))]; - priv->can.state = state; - } - return old_state; -} - -static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - u32 can_id; - int i; - - can_id = in_be16(®s->rx.idr1_0); - if (can_id & (1 << 3)) { - frame->can_id = CAN_EFF_FLAG; - can_id = ((can_id << 16) | in_be16(®s->rx.idr3_2)); - can_id = ((can_id & 0xffe00000) | - ((can_id & 0x7ffff) << 2)) >> 2; - } else { - can_id >>= 4; - frame->can_id = 0; - } - - frame->can_id |= can_id >> 1; - if (can_id & 1) - frame->can_id |= CAN_RTR_FLAG; - frame->can_dlc = in_8(®s->rx.dlr) & 0xf; - - if (!(frame->can_id & CAN_RTR_FLAG)) { - void __iomem *data = ®s->rx.dsr1_0; - u16 *payload = (u16 *)frame->data; - - for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { - *payload++ = in_be16(data); - data += 2 + _MSCAN_RESERVED_DSR_SIZE; - } - } - - out_8(®s->canrflg, MSCAN_RXF); -} - -static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, - u8 canrflg) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - struct net_device_stats *stats = &dev->stats; - enum can_state old_state; - - dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg); - frame->can_id = CAN_ERR_FLAG; - - if (canrflg & MSCAN_OVRIF) { - frame->can_id |= CAN_ERR_CRTL; - frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_over_errors++; - stats->rx_errors++; - } else { - frame->data[1] = 0; - } - - old_state = check_set_state(dev, canrflg); - /* State changed */ - if (old_state != priv->can.state) { - switch (priv->can.state) { - case CAN_STATE_ERROR_WARNING: - frame->can_id |= CAN_ERR_CRTL; - priv->can.can_stats.error_warning++; - if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) < - (canrflg & MSCAN_RSTAT_MSK)) - frame->data[1] |= CAN_ERR_CRTL_RX_WARNING; - if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) < - (canrflg & MSCAN_TSTAT_MSK)) - frame->data[1] |= CAN_ERR_CRTL_TX_WARNING; - break; - case CAN_STATE_ERROR_PASSIVE: - frame->can_id |= CAN_ERR_CRTL; - priv->can.can_stats.error_passive++; - frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; - break; - case CAN_STATE_BUS_OFF: - frame->can_id |= CAN_ERR_BUSOFF; - /* - * The MSCAN on the MPC5200 does recover from bus-off - * automatically. To avoid that we stop the chip doing - * a light-weight stop (we are in irq-context). - */ - out_8(®s->cantier, 0); - out_8(®s->canrier, 0); - setbits8(®s->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); - can_bus_off(dev); - break; - default: - break; - } - } - priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; - frame->can_dlc = CAN_ERR_DLC; - out_8(®s->canrflg, MSCAN_ERR_IF); -} - -static int mscan_rx_poll(struct napi_struct *napi, int quota) -{ - struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi); - struct net_device *dev = napi->dev; - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - struct net_device_stats *stats = &dev->stats; - int npackets = 0; - int ret = 1; - struct sk_buff *skb; - struct can_frame *frame; - u8 canrflg; - - while (npackets < quota) { - canrflg = in_8(®s->canrflg); - if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) - break; - - skb = alloc_can_skb(dev, &frame); - if (!skb) { - if (printk_ratelimit()) - dev_notice(dev->dev.parent, "packet dropped\n"); - stats->rx_dropped++; - out_8(®s->canrflg, canrflg); - continue; - } - - if (canrflg & MSCAN_RXF) - mscan_get_rx_frame(dev, frame); - else if (canrflg & MSCAN_ERR_IF) - mscan_get_err_frame(dev, frame, canrflg); - - stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; - npackets++; - netif_receive_skb(skb); - } - - if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { - napi_complete(&priv->napi); - clear_bit(F_RX_PROGRESS, &priv->flags); - if (priv->can.state < CAN_STATE_BUS_OFF) - out_8(®s->canrier, priv->shadow_canrier); - ret = 0; - } - return ret; -} - -static irqreturn_t mscan_isr(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *)dev_id; - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - struct net_device_stats *stats = &dev->stats; - u8 cantier, cantflg, canrflg; - irqreturn_t ret = IRQ_NONE; - - cantier = in_8(®s->cantier) & MSCAN_TXE; - cantflg = in_8(®s->cantflg) & cantier; - - if (cantier && cantflg) { - struct list_head *tmp, *pos; - - list_for_each_safe(pos, tmp, &priv->tx_head) { - struct tx_queue_entry *entry = - list_entry(pos, struct tx_queue_entry, list); - u8 mask = entry->mask; - - if (!(cantflg & mask)) - continue; - - out_8(®s->cantbsel, mask); - stats->tx_bytes += in_8(®s->tx.dlr); - stats->tx_packets++; - can_get_echo_skb(dev, entry->id); - priv->tx_active &= ~mask; - list_del(pos); - } - - if (list_empty(&priv->tx_head)) { - clear_bit(F_TX_WAIT_ALL, &priv->flags); - clear_bit(F_TX_PROGRESS, &priv->flags); - priv->cur_pri = 0; - } else { - dev->trans_start = jiffies; - } - - if (!test_bit(F_TX_WAIT_ALL, &priv->flags)) - netif_wake_queue(dev); - - out_8(®s->cantier, priv->tx_active); - ret = IRQ_HANDLED; - } - - canrflg = in_8(®s->canrflg); - if ((canrflg & ~MSCAN_STAT_MSK) && - !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) { - if (canrflg & ~MSCAN_STAT_MSK) { - priv->shadow_canrier = in_8(®s->canrier); - out_8(®s->canrier, 0); - napi_schedule(&priv->napi); - ret = IRQ_HANDLED; - } else { - clear_bit(F_RX_PROGRESS, &priv->flags); - } - } - return ret; -} - -static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode) -{ - struct mscan_priv *priv = netdev_priv(dev); - int ret = 0; - - if (!priv->open_time) - return -EINVAL; - - switch (mode) { - case CAN_MODE_START: - if (priv->can.state <= CAN_STATE_BUS_OFF) - mscan_set_mode(dev, MSCAN_INIT_MODE); - ret = mscan_start(dev); - if (ret) - break; - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); - break; - - default: - ret = -EOPNOTSUPP; - break; - } - return ret; -} - -static int mscan_do_set_bittiming(struct net_device *dev) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - struct can_bittiming *bt = &priv->can.bittiming; - u8 btr0, btr1; - - btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw); - btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) | - BTR1_SET_TSEG2(bt->phase_seg2) | - BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)); - - dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n", - btr0, btr1); - - out_8(®s->canbtr0, btr0); - out_8(®s->canbtr1, btr1); - - return 0; -} - -static int mscan_open(struct net_device *dev) -{ - int ret; - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - - /* common open */ - ret = open_candev(dev); - if (ret) - return ret; - - napi_enable(&priv->napi); - - ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev); - if (ret < 0) { - dev_err(dev->dev.parent, "failed to attach interrupt\n"); - goto exit_napi_disable; - } - - priv->open_time = jiffies; - - clrbits8(®s->canctl1, MSCAN_LISTEN); - - ret = mscan_start(dev); - if (ret) - goto exit_free_irq; - - netif_start_queue(dev); - - return 0; - -exit_free_irq: - priv->open_time = 0; - free_irq(dev->irq, dev); -exit_napi_disable: - napi_disable(&priv->napi); - close_candev(dev); - return ret; -} - -static int mscan_close(struct net_device *dev) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - - netif_stop_queue(dev); - napi_disable(&priv->napi); - - out_8(®s->cantier, 0); - out_8(®s->canrier, 0); - mscan_set_mode(dev, MSCAN_INIT_MODE); - close_candev(dev); - free_irq(dev->irq, dev); - priv->open_time = 0; - - return 0; -} - -static const struct net_device_ops mscan_netdev_ops = { - .ndo_open = mscan_open, - .ndo_stop = mscan_close, - .ndo_start_xmit = mscan_start_xmit, -}; - -int register_mscandev(struct net_device *dev, int clock_src) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - u8 ctl1; - - ctl1 = in_8(®s->canctl1); - if (clock_src) - ctl1 |= MSCAN_CLKSRC; - else - ctl1 &= ~MSCAN_CLKSRC; - - ctl1 |= MSCAN_CANE; - out_8(®s->canctl1, ctl1); - udelay(100); - - /* acceptance mask/acceptance code (accept everything) */ - out_be16(®s->canidar1_0, 0); - out_be16(®s->canidar3_2, 0); - out_be16(®s->canidar5_4, 0); - out_be16(®s->canidar7_6, 0); - - out_be16(®s->canidmr1_0, 0xffff); - out_be16(®s->canidmr3_2, 0xffff); - out_be16(®s->canidmr5_4, 0xffff); - out_be16(®s->canidmr7_6, 0xffff); - /* Two 32 bit Acceptance Filters */ - out_8(®s->canidac, MSCAN_AF_32BIT); - - mscan_set_mode(dev, MSCAN_INIT_MODE); - - return register_candev(dev); -} - -void unregister_mscandev(struct net_device *dev) -{ - struct mscan_priv *priv = netdev_priv(dev); - struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; - mscan_set_mode(dev, MSCAN_INIT_MODE); - clrbits8(®s->canctl1, MSCAN_CANE); - unregister_candev(dev); -} - -struct net_device *alloc_mscandev(void) -{ - struct net_device *dev; - struct mscan_priv *priv; - int i; - - dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX); - if (!dev) - return NULL; - priv = netdev_priv(dev); - - dev->netdev_ops = &mscan_netdev_ops; - - dev->flags |= IFF_ECHO; /* we support local echo */ - - netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8); - - priv->can.bittiming_const = &mscan_bittiming_const; - priv->can.do_set_bittiming = mscan_do_set_bittiming; - priv->can.do_set_mode = mscan_do_set_mode; - - for (i = 0; i < TX_QUEUE_SIZE; i++) { - priv->tx_queue[i].id = i; - priv->tx_queue[i].mask = 1 << i; - } - - return dev; -} - -MODULE_AUTHOR("Andrey Volkov "); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips"); diff --git a/trunk/drivers/net/can/mscan/mscan.h b/trunk/drivers/net/can/mscan/mscan.h deleted file mode 100644 index 00fc4aaf1ed8..000000000000 --- a/trunk/drivers/net/can/mscan/mscan.h +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Definitions of consts/structs to drive the Freescale MSCAN. - * - * Copyright (C) 2005-2006 Andrey Volkov , - * Varma Electronics Oy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the version 2 of the GNU General Public License - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __MSCAN_H__ -#define __MSCAN_H__ - -#include - -/* MSCAN control register 0 (CANCTL0) bits */ -#define MSCAN_RXFRM 0x80 -#define MSCAN_RXACT 0x40 -#define MSCAN_CSWAI 0x20 -#define MSCAN_SYNCH 0x10 -#define MSCAN_TIME 0x08 -#define MSCAN_WUPE 0x04 -#define MSCAN_SLPRQ 0x02 -#define MSCAN_INITRQ 0x01 - -/* MSCAN control register 1 (CANCTL1) bits */ -#define MSCAN_CANE 0x80 -#define MSCAN_CLKSRC 0x40 -#define MSCAN_LOOPB 0x20 -#define MSCAN_LISTEN 0x10 -#define MSCAN_WUPM 0x04 -#define MSCAN_SLPAK 0x02 -#define MSCAN_INITAK 0x01 - -/* Use the MPC5200 MSCAN variant? */ -#ifdef CONFIG_PPC -#define MSCAN_FOR_MPC5200 -#endif - -#ifdef MSCAN_FOR_MPC5200 -#define MSCAN_CLKSRC_BUS 0 -#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC -#else -#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC -#define MSCAN_CLKSRC_XTAL 0 -#endif - -/* MSCAN receiver flag register (CANRFLG) bits */ -#define MSCAN_WUPIF 0x80 -#define MSCAN_CSCIF 0x40 -#define MSCAN_RSTAT1 0x20 -#define MSCAN_RSTAT0 0x10 -#define MSCAN_TSTAT1 0x08 -#define MSCAN_TSTAT0 0x04 -#define MSCAN_OVRIF 0x02 -#define MSCAN_RXF 0x01 -#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF) -#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0) -#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0) -#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK) - -#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \ - MSCAN_TSTAT1 | MSCAN_TSTAT0) -#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2) -#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4) -#define MSCAN_STATE_ACTIVE 0 -#define MSCAN_STATE_WARNING 1 -#define MSCAN_STATE_PASSIVE 2 -#define MSCAN_STATE_BUSOFF 3 - -/* MSCAN receiver interrupt enable register (CANRIER) bits */ -#define MSCAN_WUPIE 0x80 -#define MSCAN_CSCIE 0x40 -#define MSCAN_RSTATE1 0x20 -#define MSCAN_RSTATE0 0x10 -#define MSCAN_TSTATE1 0x08 -#define MSCAN_TSTATE0 0x04 -#define MSCAN_OVRIE 0x02 -#define MSCAN_RXFIE 0x01 - -/* MSCAN transmitter flag register (CANTFLG) bits */ -#define MSCAN_TXE2 0x04 -#define MSCAN_TXE1 0x02 -#define MSCAN_TXE0 0x01 -#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0) - -/* MSCAN transmitter interrupt enable register (CANTIER) bits */ -#define MSCAN_TXIE2 0x04 -#define MSCAN_TXIE1 0x02 -#define MSCAN_TXIE0 0x01 -#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0) - -/* MSCAN transmitter message abort request (CANTARQ) bits */ -#define MSCAN_ABTRQ2 0x04 -#define MSCAN_ABTRQ1 0x02 -#define MSCAN_ABTRQ0 0x01 - -/* MSCAN transmitter message abort ack (CANTAAK) bits */ -#define MSCAN_ABTAK2 0x04 -#define MSCAN_ABTAK1 0x02 -#define MSCAN_ABTAK0 0x01 - -/* MSCAN transmit buffer selection (CANTBSEL) bits */ -#define MSCAN_TX2 0x04 -#define MSCAN_TX1 0x02 -#define MSCAN_TX0 0x01 - -/* MSCAN ID acceptance control register (CANIDAC) bits */ -#define MSCAN_IDAM1 0x20 -#define MSCAN_IDAM0 0x10 -#define MSCAN_IDHIT2 0x04 -#define MSCAN_IDHIT1 0x02 -#define MSCAN_IDHIT0 0x01 - -#define MSCAN_AF_32BIT 0x00 -#define MSCAN_AF_16BIT MSCAN_IDAM0 -#define MSCAN_AF_8BIT MSCAN_IDAM1 -#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1) -#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1)) - -/* MSCAN Miscellaneous Register (CANMISC) bits */ -#define MSCAN_BOHOLD 0x01 - -/* MSCAN Identifier Register (IDR) bits */ -#define MSCAN_SFF_RTR_SHIFT 4 -#define MSCAN_EFF_RTR_SHIFT 0 -#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */ - -#ifdef MSCAN_FOR_MPC5200 -#define _MSCAN_RESERVED_(n, num) u8 _res##n[num] -#define _MSCAN_RESERVED_DSR_SIZE 2 -#else -#define _MSCAN_RESERVED_(n, num) -#define _MSCAN_RESERVED_DSR_SIZE 0 -#endif - -/* Structure of the hardware registers */ -struct mscan_regs { - /* (see doc S12MSCANV3/D) MPC5200 MSCAN */ - u8 canctl0; /* + 0x00 0x00 */ - u8 canctl1; /* + 0x01 0x01 */ - _MSCAN_RESERVED_(1, 2); /* + 0x02 */ - u8 canbtr0; /* + 0x04 0x02 */ - u8 canbtr1; /* + 0x05 0x03 */ - _MSCAN_RESERVED_(2, 2); /* + 0x06 */ - u8 canrflg; /* + 0x08 0x04 */ - u8 canrier; /* + 0x09 0x05 */ - _MSCAN_RESERVED_(3, 2); /* + 0x0a */ - u8 cantflg; /* + 0x0c 0x06 */ - u8 cantier; /* + 0x0d 0x07 */ - _MSCAN_RESERVED_(4, 2); /* + 0x0e */ - u8 cantarq; /* + 0x10 0x08 */ - u8 cantaak; /* + 0x11 0x09 */ - _MSCAN_RESERVED_(5, 2); /* + 0x12 */ - u8 cantbsel; /* + 0x14 0x0a */ - u8 canidac; /* + 0x15 0x0b */ - u8 reserved; /* + 0x16 0x0c */ - _MSCAN_RESERVED_(6, 5); /* + 0x17 */ -#ifndef MSCAN_FOR_MPC5200 - u8 canmisc; /* 0x0d */ -#endif - u8 canrxerr; /* + 0x1c 0x0e */ - u8 cantxerr; /* + 0x1d 0x0f */ - _MSCAN_RESERVED_(7, 2); /* + 0x1e */ - u16 canidar1_0; /* + 0x20 0x10 */ - _MSCAN_RESERVED_(8, 2); /* + 0x22 */ - u16 canidar3_2; /* + 0x24 0x12 */ - _MSCAN_RESERVED_(9, 2); /* + 0x26 */ - u16 canidmr1_0; /* + 0x28 0x14 */ - _MSCAN_RESERVED_(10, 2); /* + 0x2a */ - u16 canidmr3_2; /* + 0x2c 0x16 */ - _MSCAN_RESERVED_(11, 2); /* + 0x2e */ - u16 canidar5_4; /* + 0x30 0x18 */ - _MSCAN_RESERVED_(12, 2); /* + 0x32 */ - u16 canidar7_6; /* + 0x34 0x1a */ - _MSCAN_RESERVED_(13, 2); /* + 0x36 */ - u16 canidmr5_4; /* + 0x38 0x1c */ - _MSCAN_RESERVED_(14, 2); /* + 0x3a */ - u16 canidmr7_6; /* + 0x3c 0x1e */ - _MSCAN_RESERVED_(15, 2); /* + 0x3e */ - struct { - u16 idr1_0; /* + 0x40 0x20 */ - _MSCAN_RESERVED_(16, 2); /* + 0x42 */ - u16 idr3_2; /* + 0x44 0x22 */ - _MSCAN_RESERVED_(17, 2); /* + 0x46 */ - u16 dsr1_0; /* + 0x48 0x24 */ - _MSCAN_RESERVED_(18, 2); /* + 0x4a */ - u16 dsr3_2; /* + 0x4c 0x26 */ - _MSCAN_RESERVED_(19, 2); /* + 0x4e */ - u16 dsr5_4; /* + 0x50 0x28 */ - _MSCAN_RESERVED_(20, 2); /* + 0x52 */ - u16 dsr7_6; /* + 0x54 0x2a */ - _MSCAN_RESERVED_(21, 2); /* + 0x56 */ - u8 dlr; /* + 0x58 0x2c */ - u8:8; /* + 0x59 0x2d */ - _MSCAN_RESERVED_(22, 2); /* + 0x5a */ - u16 time; /* + 0x5c 0x2e */ - } rx; - _MSCAN_RESERVED_(23, 2); /* + 0x5e */ - struct { - u16 idr1_0; /* + 0x60 0x30 */ - _MSCAN_RESERVED_(24, 2); /* + 0x62 */ - u16 idr3_2; /* + 0x64 0x32 */ - _MSCAN_RESERVED_(25, 2); /* + 0x66 */ - u16 dsr1_0; /* + 0x68 0x34 */ - _MSCAN_RESERVED_(26, 2); /* + 0x6a */ - u16 dsr3_2; /* + 0x6c 0x36 */ - _MSCAN_RESERVED_(27, 2); /* + 0x6e */ - u16 dsr5_4; /* + 0x70 0x38 */ - _MSCAN_RESERVED_(28, 2); /* + 0x72 */ - u16 dsr7_6; /* + 0x74 0x3a */ - _MSCAN_RESERVED_(29, 2); /* + 0x76 */ - u8 dlr; /* + 0x78 0x3c */ - u8 tbpr; /* + 0x79 0x3d */ - _MSCAN_RESERVED_(30, 2); /* + 0x7a */ - u16 time; /* + 0x7c 0x3e */ - } tx; - _MSCAN_RESERVED_(31, 2); /* + 0x7e */ -} __attribute__ ((packed)); - -#undef _MSCAN_RESERVED_ -#define MSCAN_REGION sizeof(struct mscan) - -#define MSCAN_NORMAL_MODE 0 -#define MSCAN_SLEEP_MODE MSCAN_SLPRQ -#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ) -#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ) -#define MSCAN_SET_MODE_RETRIES 255 -#define MSCAN_ECHO_SKB_MAX 3 - -#define BTR0_BRP_MASK 0x3f -#define BTR0_SJW_SHIFT 6 -#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT) - -#define BTR1_TSEG1_MASK 0xf -#define BTR1_TSEG2_SHIFT 4 -#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT) -#define BTR1_SAM_SHIFT 7 - -#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK) -#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \ - BTR0_SJW_MASK) - -#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK) -#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \ - BTR1_TSEG2_MASK) -#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0) - -#define F_RX_PROGRESS 0 -#define F_TX_PROGRESS 1 -#define F_TX_WAIT_ALL 2 - -#define TX_QUEUE_SIZE 3 - -struct tx_queue_entry { - struct list_head list; - u8 mask; - u8 id; -}; - -struct mscan_priv { - struct can_priv can; /* must be the first member */ - long open_time; - unsigned long flags; - void __iomem *reg_base; /* ioremap'ed address to registers */ - u8 shadow_statflg; - u8 shadow_canrier; - u8 cur_pri; - u8 prev_buf_id; - u8 tx_active; - - struct list_head tx_head; - struct tx_queue_entry tx_queue[TX_QUEUE_SIZE]; - struct napi_struct napi; -}; - -extern struct net_device *alloc_mscandev(void); -/* - * clock_src: - * 1 = The MSCAN clock source is the onchip Bus Clock. - * 0 = The MSCAN clock source is the chip Oscillator Clock. - */ -extern int register_mscandev(struct net_device *dev, int clock_src); -extern void unregister_mscandev(struct net_device *dev); - -#endif /* __MSCAN_H__ */ diff --git a/trunk/drivers/net/can/sja1000/Kconfig b/trunk/drivers/net/can/sja1000/Kconfig deleted file mode 100644 index 4c674927f247..000000000000 --- a/trunk/drivers/net/can/sja1000/Kconfig +++ /dev/null @@ -1,47 +0,0 @@ -menuconfig CAN_SJA1000 - tristate "Philips/NXP SJA1000 devices" - depends on CAN_DEV && HAS_IOMEM - -if CAN_SJA1000 - -config CAN_SJA1000_ISA - tristate "ISA Bus based legacy SJA1000 driver" - depends on ISA - ---help--- - This driver adds legacy support for SJA1000 chips connected to - the ISA bus using I/O port, memory mapped or indirect access. - -config CAN_SJA1000_PLATFORM - tristate "Generic Platform Bus based SJA1000 driver" - ---help--- - This driver adds support for the SJA1000 chips connected to - the "platform bus" (Linux abstraction for directly to the - processor attached devices). Which can be found on various - boards from Phytec (http://www.phytec.de) like the PCM027, - PCM038. - -config CAN_SJA1000_OF_PLATFORM - tristate "Generic OF Platform Bus based SJA1000 driver" - depends on PPC_OF - ---help--- - This driver adds support for the SJA1000 chips connected to - the OpenFirmware "platform bus" found on embedded systems with - OpenFirmware bindings, e.g. if you have a PowerPC based system - you may want to enable this option. - -config CAN_EMS_PCI - tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card" - depends on PCI - ---help--- - This driver is for the one, two or four channel CPC-PCI, - CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche - (http://www.ems-wuensche.de). - -config CAN_KVASER_PCI - tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards" - depends on PCI - ---help--- - This driver is for the the PCIcanx and PCIcan cards (1, 2 or - 4 channel) from Kvaser (http://www.kvaser.com). - -endif diff --git a/trunk/drivers/net/can/sja1000/sja1000.c b/trunk/drivers/net/can/sja1000/sja1000.c index b4ba88a31075..782a47fabf2c 100644 --- a/trunk/drivers/net/can/sja1000/sja1000.c +++ b/trunk/drivers/net/can/sja1000/sja1000.c @@ -516,7 +516,7 @@ static int sja1000_open(struct net_device *dev) /* register interrupt handler, if not done by the device driver */ if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { - err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags, + err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, dev->name, (void *)dev); if (err) { close_candev(dev); diff --git a/trunk/drivers/net/can/usb/Kconfig b/trunk/drivers/net/can/usb/Kconfig deleted file mode 100644 index bbc78e0b8a15..000000000000 --- a/trunk/drivers/net/can/usb/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -menu "CAN USB interfaces" - depends on USB && CAN_DEV - -config CAN_EMS_USB - tristate "EMS CPC-USB/ARM7 CAN/USB interface" - ---help--- - This driver is for the one channel CPC-USB/ARM7 CAN/USB interface - from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). - -endmenu diff --git a/trunk/drivers/net/can/usb/Makefile b/trunk/drivers/net/can/usb/Makefile index 0afd51d4c7a5..c3f75ba701b1 100644 --- a/trunk/drivers/net/can/usb/Makefile +++ b/trunk/drivers/net/can/usb/Makefile @@ -3,5 +3,3 @@ # obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/trunk/drivers/net/davinci_emac.c b/trunk/drivers/net/davinci_emac.c index 8edac8915ea8..79ce8e857eab 100644 --- a/trunk/drivers/net/davinci_emac.c +++ b/trunk/drivers/net/davinci_emac.c @@ -2136,6 +2136,9 @@ static int emac_poll(struct napi_struct *napi, int budget) u32 status = 0; u32 num_pkts = 0; + if (!netif_running(ndev)) + return 0; + /* Check interrupt vectors and call packet processing */ status = emac_read(EMAC_MACINVECTOR); diff --git a/trunk/drivers/net/dm9000.c b/trunk/drivers/net/dm9000.c index 3aab2e466008..31b8bef49d2e 100644 --- a/trunk/drivers/net/dm9000.c +++ b/trunk/drivers/net/dm9000.c @@ -100,7 +100,6 @@ typedef struct board_info { unsigned int flags; unsigned int in_suspend :1; - unsigned int wake_supported :1; int debug_level; enum dm9000_type type; @@ -117,8 +116,6 @@ typedef struct board_info { struct resource *data_req; struct resource *irq_res; - int irq_wake; - struct mutex addr_lock; /* phy and eeprom access lock */ struct delayed_work phy_poll; @@ -128,7 +125,6 @@ typedef struct board_info { struct mii_if_info mii; u32 msg_enable; - u32 wake_state; int rx_csum; int can_csum; @@ -572,54 +568,6 @@ static int dm9000_set_eeprom(struct net_device *dev, return 0; } -static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) -{ - board_info_t *dm = to_dm9000_board(dev); - - memset(w, 0, sizeof(struct ethtool_wolinfo)); - - /* note, we could probably support wake-phy too */ - w->supported = dm->wake_supported ? WAKE_MAGIC : 0; - w->wolopts = dm->wake_state; -} - -static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) -{ - board_info_t *dm = to_dm9000_board(dev); - unsigned long flags; - u32 opts = w->wolopts; - u32 wcr = 0; - - if (!dm->wake_supported) - return -EOPNOTSUPP; - - if (opts & ~WAKE_MAGIC) - return -EINVAL; - - if (opts & WAKE_MAGIC) - wcr |= WCR_MAGICEN; - - mutex_lock(&dm->addr_lock); - - spin_lock_irqsave(&dm->lock, flags); - iow(dm, DM9000_WCR, wcr); - spin_unlock_irqrestore(&dm->lock, flags); - - mutex_unlock(&dm->addr_lock); - - if (dm->wake_state != opts) { - /* change in wol state, update IRQ state */ - - if (!dm->wake_state) - set_irq_wake(dm->irq_wake, 1); - else if (dm->wake_state & !opts) - set_irq_wake(dm->irq_wake, 0); - } - - dm->wake_state = opts; - return 0; -} - static const struct ethtool_ops dm9000_ethtool_ops = { .get_drvinfo = dm9000_get_drvinfo, .get_settings = dm9000_get_settings, @@ -628,8 +576,6 @@ static const struct ethtool_ops dm9000_ethtool_ops = { .set_msglevel = dm9000_set_msglevel, .nway_reset = dm9000_nway_reset, .get_link = dm9000_get_link, - .get_wol = dm9000_get_wol, - .set_wol = dm9000_set_wol, .get_eeprom_len = dm9000_get_eeprom_len, .get_eeprom = dm9000_get_eeprom, .set_eeprom = dm9000_set_eeprom, @@ -776,7 +722,6 @@ dm9000_init_dm9000(struct net_device *dev) { board_info_t *db = netdev_priv(dev); unsigned int imr; - unsigned int ncr; dm9000_dbg(db, 1, "entering %s\n", __func__); @@ -791,15 +736,8 @@ dm9000_init_dm9000(struct net_device *dev) iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ iow(db, DM9000_GPR, 0); /* Enable PHY */ - ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; - - /* if wol is needed, then always set NCR_WAKEEN otherwise we end - * up dumping the wake events if we disable this. There is already - * a wake-mask in DM9000_WCR */ - if (db->wake_supported) - ncr |= NCR_WAKEEN; - - iow(db, DM9000_NCR, ncr); + if (db->flags & DM9000_PLATF_EXT_PHY) + iow(db, DM9000_NCR, NCR_EXT_PHY); /* Program operating register */ iow(db, DM9000_TCR, 0); /* TX Polling clear */ @@ -1107,41 +1045,6 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - board_info_t *db = netdev_priv(dev); - unsigned long flags; - unsigned nsr, wcr; - - spin_lock_irqsave(&db->lock, flags); - - nsr = ior(db, DM9000_NSR); - wcr = ior(db, DM9000_WCR); - - dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); - - if (nsr & NSR_WAKEST) { - /* clear, so we can avoid */ - iow(db, DM9000_NSR, NSR_WAKEST); - - if (wcr & WCR_LINKST) - dev_info(db->dev, "wake by link status change\n"); - if (wcr & WCR_SAMPLEST) - dev_info(db->dev, "wake by sample packet\n"); - if (wcr & WCR_MAGICST ) - dev_info(db->dev, "wake by magic packet\n"); - if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) - dev_err(db->dev, "wake signalled with no reason? " - "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); - - } - - spin_unlock_irqrestore(&db->lock, flags); - - return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; -} - #ifdef CONFIG_NET_POLL_CONTROLLER /* *Used by netconsole @@ -1396,29 +1299,6 @@ dm9000_probe(struct platform_device *pdev) goto out; } - db->irq_wake = platform_get_irq(pdev, 1); - if (db->irq_wake >= 0) { - dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); - - ret = request_irq(db->irq_wake, dm9000_wol_interrupt, - IRQF_SHARED, dev_name(db->dev), ndev); - if (ret) { - dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); - } else { - - /* test to see if irq is really wakeup capable */ - ret = set_irq_wake(db->irq_wake, 1); - if (ret) { - dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", - db->irq_wake, ret); - ret = 0; - } else { - set_irq_wake(db->irq_wake, 0); - db->wake_supported = 1; - } - } - } - iosize = resource_size(db->addr_res); db->addr_req = request_mem_region(db->addr_res->start, iosize, pdev->name); @@ -1610,14 +1490,10 @@ dm9000_drv_suspend(struct device *dev) db = netdev_priv(ndev); db->in_suspend = 1; - if (!netif_running(ndev)) - return 0; - - netif_device_detach(ndev); - - /* only shutdown if not using WoL */ - if (!db->wake_state) + if (netif_running(ndev)) { + netif_device_detach(ndev); dm9000_shutdown(ndev); + } } return 0; } @@ -1630,13 +1506,10 @@ dm9000_drv_resume(struct device *dev) board_info_t *db = netdev_priv(ndev); if (ndev) { + if (netif_running(ndev)) { - /* reset if we were not in wake mode to ensure if - * the device was powered off it is in a known state */ - if (!db->wake_state) { - dm9000_reset(db); - dm9000_init_dm9000(ndev); - } + dm9000_reset(db); + dm9000_init_dm9000(ndev); netif_device_attach(ndev); } diff --git a/trunk/drivers/net/dm9000.h b/trunk/drivers/net/dm9000.h index 55688bd1a3ef..fb1c924d79b4 100644 --- a/trunk/drivers/net/dm9000.h +++ b/trunk/drivers/net/dm9000.h @@ -111,13 +111,6 @@ #define RSR_CE (1<<1) #define RSR_FOE (1<<0) -#define WCR_LINKEN (1 << 5) -#define WCR_SAMPLEEN (1 << 4) -#define WCR_MAGICEN (1 << 3) -#define WCR_LINKST (1 << 2) -#define WCR_SAMPLEST (1 << 1) -#define WCR_MAGICST (1 << 0) - #define FCTR_HWOT(ot) (( ot & 0xf ) << 4 ) #define FCTR_LWOT(ot) ( ot & 0xf ) diff --git a/trunk/drivers/net/ethoc.c b/trunk/drivers/net/ethoc.c index 96b6dc42fc74..f1c565282d58 100644 --- a/trunk/drivers/net/ethoc.c +++ b/trunk/drivers/net/ethoc.c @@ -640,7 +640,7 @@ static int ethoc_mdio_probe(struct net_device *dev) return -ENXIO; } - phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0, + phy = phy_connect(dev, dev_name(&phy->dev), ðoc_mdio_poll, 0, PHY_INTERFACE_MODE_GMII); if (IS_ERR(phy)) { dev_err(&dev->dev, "could not attach to PHY\n"); diff --git a/trunk/drivers/net/forcedeth.c b/trunk/drivers/net/forcedeth.c index 73fe97777201..0a1c2bb27d4d 100644 --- a/trunk/drivers/net/forcedeth.c +++ b/trunk/drivers/net/forcedeth.c @@ -5820,7 +5820,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev->dev_addr); dev_printk(KERN_ERR, &pci_dev->dev, "Please complain to your hardware vendor. Switching to a random MAC.\n"); - random_ether_addr(dev->dev_addr); + dev->dev_addr[0] = 0x00; + dev->dev_addr[1] = 0x00; + dev->dev_addr[2] = 0x6c; + get_random_bytes(&dev->dev_addr[3], 3); } dprintk(KERN_DEBUG "%s: MAC Address %pM\n", diff --git a/trunk/drivers/net/gianfar.c b/trunk/drivers/net/gianfar.c index 16def131c390..197b358e6361 100644 --- a/trunk/drivers/net/gianfar.c +++ b/trunk/drivers/net/gianfar.c @@ -1246,7 +1246,7 @@ static int gfar_restore(struct device *dev) phy_start(priv->phydev); netif_device_attach(ndev); - enable_napi(priv); + napi_enable(&priv->gfargrp.napi); return 0; } @@ -1928,11 +1928,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; + spin_lock_irqsave(&tx_queue->txlock, flags); + /* check if there is space to queue this packet */ if ((nr_frags+1) > tx_queue->num_txbdfree) { /* no space, stop the queue */ netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; + spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_BUSY; } @@ -1995,20 +1998,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); - /* - * We can work in parallel with gfar_clean_tx_ring(), except - * when modifying num_txbdfree. Note that we didn't grab the lock - * when we were reading the num_txbdfree and checking for available - * space, that's because outside of this function it can only grow, - * and once we've got needed space, it cannot suddenly disappear. - * - * The lock also protects us from gfar_error(), which can modify - * regs->tstat and thus retrigger the transfers, which is why we - * also must grab the lock before setting ready bit for the first - * to be transmitted BD. - */ - spin_lock_irqsave(&tx_queue->txlock, flags); - /* * The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and @@ -2236,8 +2225,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) skb_dirtytx = tx_queue->skb_dirtytx; while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { - unsigned long flags; - frags = skb_shinfo(skb)->nr_frags; lbdp = skip_txbd(bdp, frags, base, tx_ring_size); @@ -2282,9 +2269,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) TX_RING_MOD_MASK(tx_ring_size); howmany++; - spin_lock_irqsave(&tx_queue->txlock, flags); tx_queue->num_txbdfree += frags + 1; - spin_unlock_irqrestore(&tx_queue->txlock, flags); } /* If we freed a buffer, we can restart transmission, if necessary */ @@ -2519,6 +2504,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb_put(skb, pkt_len); dev->stats.rx_bytes += pkt_len; + if (in_irq() || irqs_disabled()) + printk("Interrupt problem!\n"); gfar_process_frame(dev, skb, amount_pull); } else { @@ -2563,6 +2550,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) int tx_cleaned = 0, i, left_over_budget = budget; unsigned long serviced_queues = 0; int num_queues = 0; + unsigned long flags; num_queues = gfargrp->num_rx_queues; budget_per_queue = budget/num_queues; @@ -2582,7 +2570,14 @@ static int gfar_poll(struct napi_struct *napi, int budget) rx_queue = priv->rx_queue[i]; tx_queue = priv->tx_queue[rx_queue->qindex]; - tx_cleaned += gfar_clean_tx_ring(tx_queue); + /* If we fail to get the lock, + * don't bother with the TX BDs */ + if (spin_trylock_irqsave(&tx_queue->txlock, flags)) { + tx_cleaned += gfar_clean_tx_ring(tx_queue); + spin_unlock_irqrestore(&tx_queue->txlock, + flags); + } + rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, budget_per_queue); rx_cleaned += rx_cleaned_per_queue; @@ -2950,22 +2945,14 @@ static irqreturn_t gfar_error(int irq, void *grp_id) if (events & IEVENT_CRL) dev->stats.tx_aborted_errors++; if (events & IEVENT_XFUN) { - unsigned long flags; - if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: TX FIFO underrun, " "packet dropped.\n", dev->name); dev->stats.tx_dropped++; priv->extra_stats.tx_underrun++; - local_irq_save(flags); - lock_tx_qs(priv); - /* Reactivate the Tx Queues */ gfar_write(®s->tstat, gfargrp->tstat); - - unlock_tx_qs(priv); - local_irq_restore(flags); } if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); diff --git a/trunk/drivers/net/gianfar_sysfs.c b/trunk/drivers/net/gianfar_sysfs.c index b31c9c8876e6..3724835d2856 100644 --- a/trunk/drivers/net/gianfar_sysfs.c +++ b/trunk/drivers/net/gianfar_sysfs.c @@ -186,7 +186,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev, temp = gfar_read(®s->attreli); temp &= ~ATTRELI_EI_MASK; temp |= ATTRELI_EI(index); - gfar_write(®s->attreli, temp); + gfar_write(®s->attreli, flags); out: unlock_rx_qs(priv); diff --git a/trunk/drivers/net/hamradio/mkiss.c b/trunk/drivers/net/hamradio/mkiss.c index 7db0a1c3216c..fc9c57893f8a 100644 --- a/trunk/drivers/net/hamradio/mkiss.c +++ b/trunk/drivers/net/hamradio/mkiss.c @@ -903,7 +903,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file, static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { - switch (cmd) { + switch (arg) { case SIOCGIFNAME: case SIOCGIFENCAP: case SIOCSIFENCAP: diff --git a/trunk/drivers/net/igb/igb.h b/trunk/drivers/net/igb/igb.h index 63abd1c0d75e..3298f5a11dab 100644 --- a/trunk/drivers/net/igb/igb.h +++ b/trunk/drivers/net/igb/igb.h @@ -59,10 +59,10 @@ struct igb_adapter; #define MAX_Q_VECTORS 8 /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ - (hw->mac.type > e1000_82575 ? 8 : 4)) -#define IGB_ABS_MAX_TX_QUEUES 8 -#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES +#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ + (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) +#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES +#define IGB_ABS_MAX_TX_QUEUES 4 #define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 @@ -249,6 +249,10 @@ struct igb_adapter { u16 link_speed; u16 link_duplex; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; /* Interrupt Throttle Rate */ u32 rx_itr_setting; u32 tx_itr_setting; @@ -311,7 +315,6 @@ struct igb_adapter { u16 rx_ring_count; unsigned int vfs_allocated_count; struct vf_data_storage *vf_data; - u32 rss_queues; }; #define IGB_FLAG_HAS_MSI (1 << 0) diff --git a/trunk/drivers/net/igb/igb_ethtool.c b/trunk/drivers/net/igb/igb_ethtool.c index c1cde5b44906..90b89a81f669 100644 --- a/trunk/drivers/net/igb/igb_ethtool.c +++ b/trunk/drivers/net/igb/igb_ethtool.c @@ -37,88 +37,77 @@ #include "igb.h" +enum {NETDEV_STATS, IGB_STATS}; + struct igb_stats { char stat_string[ETH_GSTRING_LEN]; + int type; int sizeof_stat; int stat_offset; }; -#define IGB_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ - .stat_offset = offsetof(struct igb_adapter, _stat) \ -} -static const struct igb_stats igb_gstrings_stats[] = { - IGB_STAT("rx_packets", stats.gprc), - IGB_STAT("tx_packets", stats.gptc), - IGB_STAT("rx_bytes", stats.gorc), - IGB_STAT("tx_bytes", stats.gotc), - IGB_STAT("rx_broadcast", stats.bprc), - IGB_STAT("tx_broadcast", stats.bptc), - IGB_STAT("rx_multicast", stats.mprc), - IGB_STAT("tx_multicast", stats.mptc), - IGB_STAT("multicast", stats.mprc), - IGB_STAT("collisions", stats.colc), - IGB_STAT("rx_crc_errors", stats.crcerrs), - IGB_STAT("rx_no_buffer_count", stats.rnbc), - IGB_STAT("rx_missed_errors", stats.mpc), - IGB_STAT("tx_aborted_errors", stats.ecol), - IGB_STAT("tx_carrier_errors", stats.tncrs), - IGB_STAT("tx_window_errors", stats.latecol), - IGB_STAT("tx_abort_late_coll", stats.latecol), - IGB_STAT("tx_deferred_ok", stats.dc), - IGB_STAT("tx_single_coll_ok", stats.scc), - IGB_STAT("tx_multi_coll_ok", stats.mcc), - IGB_STAT("tx_timeout_count", tx_timeout_count), - IGB_STAT("rx_long_length_errors", stats.roc), - IGB_STAT("rx_short_length_errors", stats.ruc), - IGB_STAT("rx_align_errors", stats.algnerrc), - IGB_STAT("tx_tcp_seg_good", stats.tsctc), - IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), - IGB_STAT("rx_flow_control_xon", stats.xonrxc), - IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), - IGB_STAT("tx_flow_control_xon", stats.xontxc), - IGB_STAT("tx_flow_control_xoff", stats.xofftxc), - IGB_STAT("rx_long_byte_count", stats.gorc), - IGB_STAT("tx_dma_out_of_sync", stats.doosync), - IGB_STAT("tx_smbus", stats.mgptc), - IGB_STAT("rx_smbus", stats.mgprc), - IGB_STAT("dropped_smbus", stats.mgpdc), -}; +#define IGB_STAT(m) IGB_STATS, \ + FIELD_SIZEOF(struct igb_adapter, m), \ + offsetof(struct igb_adapter, m) +#define IGB_NETDEV_STAT(m) NETDEV_STATS, \ + FIELD_SIZEOF(struct net_device, m), \ + offsetof(struct net_device, m) -#define IGB_NETDEV_STAT(_net_stat) { \ - .stat_string = __stringify(_net_stat), \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ - .stat_offset = offsetof(struct net_device_stats, _net_stat) \ -} -static const struct igb_stats igb_gstrings_net_stats[] = { - IGB_NETDEV_STAT(rx_errors), - IGB_NETDEV_STAT(tx_errors), - IGB_NETDEV_STAT(tx_dropped), - IGB_NETDEV_STAT(rx_length_errors), - IGB_NETDEV_STAT(rx_over_errors), - IGB_NETDEV_STAT(rx_frame_errors), - IGB_NETDEV_STAT(rx_fifo_errors), - IGB_NETDEV_STAT(tx_fifo_errors), - IGB_NETDEV_STAT(tx_heartbeat_errors) +static const struct igb_stats igb_gstrings_stats[] = { + { "rx_packets", IGB_STAT(stats.gprc) }, + { "tx_packets", IGB_STAT(stats.gptc) }, + { "rx_bytes", IGB_STAT(stats.gorc) }, + { "tx_bytes", IGB_STAT(stats.gotc) }, + { "rx_broadcast", IGB_STAT(stats.bprc) }, + { "tx_broadcast", IGB_STAT(stats.bptc) }, + { "rx_multicast", IGB_STAT(stats.mprc) }, + { "tx_multicast", IGB_STAT(stats.mptc) }, + { "rx_errors", IGB_NETDEV_STAT(stats.rx_errors) }, + { "tx_errors", IGB_NETDEV_STAT(stats.tx_errors) }, + { "tx_dropped", IGB_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", IGB_STAT(stats.mprc) }, + { "collisions", IGB_STAT(stats.colc) }, + { "rx_length_errors", IGB_NETDEV_STAT(stats.rx_length_errors) }, + { "rx_over_errors", IGB_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, + { "rx_frame_errors", IGB_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, + { "rx_queue_drop_packet_count", IGB_NETDEV_STAT(stats.rx_fifo_errors) }, + { "rx_missed_errors", IGB_STAT(stats.mpc) }, + { "tx_aborted_errors", IGB_STAT(stats.ecol) }, + { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, + { "tx_fifo_errors", IGB_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", IGB_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", IGB_STAT(stats.latecol) }, + { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, + { "tx_deferred_ok", IGB_STAT(stats.dc) }, + { "tx_single_coll_ok", IGB_STAT(stats.scc) }, + { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, + { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, + { "rx_long_length_errors", IGB_STAT(stats.roc) }, + { "rx_short_length_errors", IGB_STAT(stats.ruc) }, + { "rx_align_errors", IGB_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, + { "rx_long_byte_count", IGB_STAT(stats.gorc) }, + { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, + { "tx_smbus", IGB_STAT(stats.mgptc) }, + { "rx_smbus", IGB_STAT(stats.mgprc) }, + { "dropped_smbus", IGB_STAT(stats.mgpdc) }, }; -#define IGB_GLOBAL_STATS_LEN \ - (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) -#define IGB_NETDEV_STATS_LEN \ - (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) -#define IGB_RX_QUEUE_STATS_LEN \ - (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) -#define IGB_TX_QUEUE_STATS_LEN \ - (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) #define IGB_QUEUE_STATS_LEN \ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ - IGB_RX_QUEUE_STATS_LEN) + \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ - IGB_TX_QUEUE_STATS_LEN)) -#define IGB_STATS_LEN \ - (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) - + (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", @@ -746,17 +735,17 @@ static int igb_set_ringparam(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; int i, err = 0; - u16 new_rx_count, new_tx_count; + u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); - new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD); + new_rx_count = max(new_rx_count, (u32)IGB_MIN_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); - new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD); + new_tx_count = max(new_tx_count, (u32)IGB_MIN_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring_count) && @@ -1933,32 +1922,43 @@ static void igb_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct igb_adapter *adapter = netdev_priv(netdev); - struct net_device_stats *net_stats = &netdev->stats; u64 *queue_stat; - int i, j, k; - char *p; + int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); + int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); + int j; + int i; + char *p = NULL; igb_update_stats(adapter); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + switch (igb_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) netdev + + igb_gstrings_stats[i].stat_offset; + break; + case IGB_STATS: + p = (char *) adapter + + igb_gstrings_stats[i].stat_offset; + break; + } + data[i] = (igb_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { - p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; - data[i] = (igb_gstrings_net_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } for (j = 0; j < adapter->num_tx_queues; j++) { + int k; queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; - for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) - data[i] = queue_stat[k]; + for (k = 0; k < stat_count_tx; k++) + data[i + k] = queue_stat[k]; + i += k; } for (j = 0; j < adapter->num_rx_queues; j++) { + int k; queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; - for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) - data[i] = queue_stat[k]; + for (k = 0; k < stat_count_rx; k++) + data[i + k] = queue_stat[k]; + i += k; } } @@ -1979,11 +1979,6 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { - memcpy(p, igb_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } for (i = 0; i < adapter->num_tx_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; diff --git a/trunk/drivers/net/igb/igb_main.c b/trunk/drivers/net/igb/igb_main.c index 0cab5e2b0894..b044c985df0b 100644 --- a/trunk/drivers/net/igb/igb_main.c +++ b/trunk/drivers/net/igb/igb_main.c @@ -296,10 +296,10 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) * and continue consuming queues in the same sequence */ if (adapter->vfs_allocated_count) { - for (; i < adapter->rss_queues; i++) + for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i].reg_idx = rbase_offset + Q_IDX_82576(i); - for (; j < adapter->rss_queues; j++) + for (; j < adapter->num_tx_queues; j++) adapter->tx_ring[j].reg_idx = rbase_offset + Q_IDX_82576(j); } @@ -618,15 +618,14 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) int numvecs, i; /* Number of supported queues. */ - adapter->num_rx_queues = adapter->rss_queues; - adapter->num_tx_queues = adapter->rss_queues; + adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); + adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); /* start with one vector for every rx queue */ numvecs = adapter->num_rx_queues; /* if tx handler is seperate add 1 for every tx queue */ - if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) - numvecs += adapter->num_tx_queues; + numvecs += adapter->num_tx_queues; /* store the number of vectors reserved for queues */ adapter->num_q_vectors = numvecs; @@ -667,7 +666,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) } #endif adapter->vfs_allocated_count = 0; - adapter->rss_queues = 1; adapter->flags |= IGB_FLAG_QUEUE_PAIRS; adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; @@ -1568,6 +1566,56 @@ static int __devinit igb_probe(struct pci_dev *pdev, } #endif + switch (hw->mac.type) { + case e1000_82576: + /* + * Initialize hardware timer: we keep it running just in case + * that some program needs it later on. + */ + memset(&adapter->cycles, 0, sizeof(adapter->cycles)); + adapter->cycles.read = igb_read_clock; + adapter->cycles.mask = CLOCKSOURCE_MASK(64); + adapter->cycles.mult = 1; + /** + * Scale the NIC clock cycle by a large factor so that + * relatively small clock corrections can be added or + * substracted at each clock tick. The drawbacks of a large + * factor are a) that the clock register overflows more quickly + * (not such a big deal) and b) that the increment per tick has + * to fit into 24 bits. As a result we need to use a shift of + * 19 so we can fit a value of 16 into the TIMINCA register. + */ + adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; + wr32(E1000_TIMINCA, + (1 << E1000_TIMINCA_16NS_SHIFT) | + (16 << IGB_82576_TSYNC_SHIFT)); + + /* Set registers so that rollover occurs soon to test this. */ + wr32(E1000_SYSTIML, 0x00000000); + wr32(E1000_SYSTIMH, 0xFF800000); + wrfl(); + + timecounter_init(&adapter->clock, + &adapter->cycles, + ktime_to_ns(ktime_get_real())); + /* + * Synchronize our NIC clock against system wall clock. NIC + * time stamp reading requires ~3us per sample, each sample + * was pretty stable even under load => only require 10 + * samples for each offset comparison. + */ + memset(&adapter->compare, 0, sizeof(adapter->compare)); + adapter->compare.source = &adapter->clock; + adapter->compare.target = ktime_get_real; + adapter->compare.num_samples = 10; + timecompare_update(&adapter->compare, 0); + break; + case e1000_82575: + /* 82575 does not support timesync */ + default: + break; + } + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", @@ -1733,70 +1781,6 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) #endif /* CONFIG_PCI_IOV */ } - -/** - * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp - * @adapter: board private structure to initialize - * - * igb_init_hw_timer initializes the function pointer and values for the hw - * timer found in hardware. - **/ -static void igb_init_hw_timer(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - switch (hw->mac.type) { - case e1000_82576: - /* - * Initialize hardware timer: we keep it running just in case - * that some program needs it later on. - */ - memset(&adapter->cycles, 0, sizeof(adapter->cycles)); - adapter->cycles.read = igb_read_clock; - adapter->cycles.mask = CLOCKSOURCE_MASK(64); - adapter->cycles.mult = 1; - /** - * Scale the NIC clock cycle by a large factor so that - * relatively small clock corrections can be added or - * substracted at each clock tick. The drawbacks of a large - * factor are a) that the clock register overflows more quickly - * (not such a big deal) and b) that the increment per tick has - * to fit into 24 bits. As a result we need to use a shift of - * 19 so we can fit a value of 16 into the TIMINCA register. - */ - adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; - wr32(E1000_TIMINCA, - (1 << E1000_TIMINCA_16NS_SHIFT) | - (16 << IGB_82576_TSYNC_SHIFT)); - - /* Set registers so that rollover occurs soon to test this. */ - wr32(E1000_SYSTIML, 0x00000000); - wr32(E1000_SYSTIMH, 0xFF800000); - wrfl(); - - timecounter_init(&adapter->clock, - &adapter->cycles, - ktime_to_ns(ktime_get_real())); - /* - * Synchronize our NIC clock against system wall clock. NIC - * time stamp reading requires ~3us per sample, each sample - * was pretty stable even under load => only require 10 - * samples for each offset comparison. - */ - memset(&adapter->compare, 0, sizeof(adapter->compare)); - adapter->compare.source = &adapter->clock; - adapter->compare.target = ktime_get_real; - adapter->compare.num_samples = 10; - timecompare_update(&adapter->compare, 0); - break; - case e1000_82575: - /* 82575 does not support timesync */ - default: - break; - } - -} - /** * igb_sw_init - Initialize general software structures (struct igb_adapter) * @adapter: board private structure to initialize @@ -1826,24 +1810,12 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) adapter->vfs_allocated_count = max_vfs; #endif /* CONFIG_PCI_IOV */ - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); - - /* - * if rss_queues > 4 or vfs are going to be allocated with rss_queues - * then we should combine the queues into a queue pair in order to - * conserve interrupts due to limited supply - */ - if ((adapter->rss_queues > 4) || - ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } - igb_init_hw_timer(adapter); igb_probe_vfs(adapter); /* Explicitly disable IRQ since the NIC can be in any state. */ @@ -2028,7 +2000,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) } } - for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { + for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { int r_idx = i % adapter->num_tx_queues; adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; } @@ -2212,7 +2184,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) array_wr32(E1000_RSSRK(0), j, rsskey); } - num_rx_queues = adapter->rss_queues; + num_rx_queues = adapter->num_rx_queues; if (adapter->vfs_allocated_count) { /* 82575 and 82576 supports 2 RSS queues for VMDq */ @@ -2268,7 +2240,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) E1000_VT_CTL_DEFAULT_POOL_SHIFT; wr32(E1000_VT_CTL, vtctl); } - if (adapter->rss_queues > 1) + if (adapter->num_rx_queues > 1) mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; else mrqc = E1000_MRQC_ENABLE_VMDQ; @@ -2398,7 +2370,7 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn) /* clear all bits that might not be set */ vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); - if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count) vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ /* * for VMDq only allow the VFs and pool 0 to accept broadcast and @@ -2943,6 +2915,7 @@ static void igb_watchdog_task(struct work_struct *work) watchdog_task); struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + struct igb_ring *tx_ring = adapter->tx_ring; u32 link; int i; @@ -3012,25 +2985,23 @@ static void igb_watchdog_task(struct work_struct *work) igb_update_stats(adapter); igb_update_adaptive(hw); - for (i = 0; i < adapter->num_tx_queues; i++) { - struct igb_ring *tx_ring = &adapter->tx_ring[i]; - if (!netif_carrier_ok(netdev)) { + if (!netif_carrier_ok(netdev)) { + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ - if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { - adapter->tx_timeout_count++; - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; - } + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; } - - /* Force detection of hung controller every watchdog period */ - tx_ring->detect_tx_hung = true; } + /* Force detection of hung controller every watchdog period */ + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].detect_tx_hung = true; + /* Cause software interrupt to ensure rx ring is cleaned */ if (adapter->msix_entries) { u32 eics = 0; @@ -3790,7 +3761,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) void igb_update_stats(struct igb_adapter *adapter) { - struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); + struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 rnbc; @@ -3814,13 +3785,13 @@ void igb_update_stats(struct igb_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; - net_stats->rx_fifo_errors += rqdpc_tmp; + netdev->stats.rx_fifo_errors += rqdpc_tmp; bytes += adapter->rx_ring[i].rx_stats.bytes; packets += adapter->rx_ring[i].rx_stats.packets; } - net_stats->rx_bytes = bytes; - net_stats->rx_packets = packets; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; bytes = 0; packets = 0; @@ -3828,8 +3799,8 @@ void igb_update_stats(struct igb_adapter *adapter) bytes += adapter->tx_ring[i].tx_stats.bytes; packets += adapter->tx_ring[i].tx_stats.packets; } - net_stats->tx_bytes = bytes; - net_stats->tx_packets = packets; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; /* read stats registers */ adapter->stats.crcerrs += rd32(E1000_CRCERRS); @@ -3866,7 +3837,7 @@ void igb_update_stats(struct igb_adapter *adapter) rd32(E1000_GOTCH); /* clear GOTCL */ rnbc = rd32(E1000_RNBC); adapter->stats.rnbc += rnbc; - net_stats->rx_fifo_errors += rnbc; + netdev->stats.rx_fifo_errors += rnbc; adapter->stats.ruc += rd32(E1000_RUC); adapter->stats.rfc += rd32(E1000_RFC); adapter->stats.rjc += rd32(E1000_RJC); @@ -3907,29 +3878,29 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); /* Fill out the OS statistics structure */ - net_stats->multicast = adapter->stats.mprc; - net_stats->collisions = adapter->stats.colc; + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; /* Rx Errors */ /* RLEC on some newer hardware can be incorrect so build * our own version based on RUC and ROC */ - net_stats->rx_errors = adapter->stats.rxerrc + + netdev->stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; - net_stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; - net_stats->rx_crc_errors = adapter->stats.crcerrs; - net_stats->rx_frame_errors = adapter->stats.algnerrc; - net_stats->rx_missed_errors = adapter->stats.mpc; + netdev->stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - net_stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; - net_stats->tx_aborted_errors = adapter->stats.ecol; - net_stats->tx_window_errors = adapter->stats.latecol; - net_stats->tx_carrier_errors = adapter->stats.tncrs; + netdev->stats.tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ @@ -4952,7 +4923,6 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, struct sk_buff *skb; bool cleaned = false; int cleaned_count = 0; - int current_node = numa_node_id(); unsigned int total_bytes = 0, total_packets = 0; unsigned int i; u32 staterr; @@ -5007,8 +4977,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, buffer_info->page_offset, length); - if ((page_count(buffer_info->page) != 1) || - (page_to_nid(buffer_info->page) != current_node)) + if (page_count(buffer_info->page) != 1) buffer_info->page = NULL; else get_page(buffer_info->page); diff --git a/trunk/drivers/net/ipg.c b/trunk/drivers/net/ipg.c index ba8d246d05a0..63056e7b9e22 100644 --- a/trunk/drivers/net/ipg.c +++ b/trunk/drivers/net/ipg.c @@ -1751,7 +1751,7 @@ static int ipg_nic_open(struct net_device *dev) /* Register the interrupt line to be used by the IPG within * the Linux system. */ - rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, + rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, dev->name, dev); if (rc < 0) { printk(KERN_INFO "%s: Error when requesting interrupt.\n", diff --git a/trunk/drivers/net/irda/irda-usb.c b/trunk/drivers/net/irda/irda-usb.c index ae6eab3e5eed..215adf6377d0 100644 --- a/trunk/drivers/net/irda/irda-usb.c +++ b/trunk/drivers/net/irda/irda-usb.c @@ -852,7 +852,7 @@ static void irda_usb_receive(struct urb *urb) * hot unplug of the dongle... * Lowest effective timer is 10ms... * Jean II */ - self->rx_defer_timer.function = irda_usb_rx_defer_expired; + self->rx_defer_timer.function = &irda_usb_rx_defer_expired; self->rx_defer_timer.data = (unsigned long) urb; mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000)); return; diff --git a/trunk/drivers/net/ixgbe/ixgbe_main.c b/trunk/drivers/net/ixgbe/ixgbe_main.c index dceed80f16fb..448e84d56601 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ixgbe/ixgbe_main.c @@ -1204,7 +1204,6 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) adapter->link_check_timeout = jiffies; if (!test_bit(__IXGBE_DOWN, &adapter->state)) { IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); - IXGBE_WRITE_FLUSH(hw); schedule_work(&adapter->watchdog_task); } } @@ -1340,6 +1339,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) if (!q_vector->rxr_count) return IRQ_HANDLED; + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); napi_schedule(&q_vector->napi); @@ -3626,10 +3627,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) * It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. + * (roughly) twice the number of vectors as there are CPU's. */ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)num_online_cpus()) + NON_Q_VECTORS; + (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; /* * At the same time, hardware can only support a maximum of @@ -5988,7 +5989,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) } else { pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); pci_wake_from_d3(pdev, false); diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c index ae2b5c79c55e..d7dba3f6f763 100644 --- a/trunk/drivers/net/macvlan.c +++ b/trunk/drivers/net/macvlan.c @@ -38,27 +38,12 @@ struct macvlan_port { struct list_head vlans; }; -/** - * struct macvlan_rx_stats - MACVLAN percpu rx stats - * @rx_packets: number of received packets - * @rx_bytes: number of received bytes - * @multicast: number of received multicast packets - * @rx_errors: number of errors - */ -struct macvlan_rx_stats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long multicast; - unsigned long rx_errors; -}; - struct macvlan_dev { struct net_device *dev; struct list_head list; struct hlist_node hlist; struct macvlan_port *port; struct net_device *lowerdev; - struct macvlan_rx_stats *rx_stats; }; @@ -125,7 +110,6 @@ static void macvlan_broadcast(struct sk_buff *skb, struct net_device *dev; struct sk_buff *nskb; unsigned int i; - struct macvlan_rx_stats *rx_stats; if (skb->protocol == htons(ETH_P_PAUSE)) return; @@ -133,17 +117,17 @@ static void macvlan_broadcast(struct sk_buff *skb, for (i = 0; i < MACVLAN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { dev = vlan->dev; - rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id()); nskb = skb_clone(skb, GFP_ATOMIC); if (nskb == NULL) { - rx_stats->rx_errors++; + dev->stats.rx_errors++; + dev->stats.rx_dropped++; continue; } - rx_stats->rx_bytes += skb->len + ETH_HLEN; - rx_stats->rx_packets++; - rx_stats->multicast++; + dev->stats.rx_bytes += skb->len + ETH_HLEN; + dev->stats.rx_packets++; + dev->stats.multicast++; nskb->dev = dev; if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) @@ -163,7 +147,6 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) const struct macvlan_port *port; const struct macvlan_dev *vlan; struct net_device *dev; - struct macvlan_rx_stats *rx_stats; port = rcu_dereference(skb->dev->macvlan_port); if (port == NULL) @@ -183,15 +166,16 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) kfree_skb(skb); return NULL; } - rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id()); + skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) { - rx_stats->rx_errors++; + dev->stats.rx_errors++; + dev->stats.rx_dropped++; return NULL; } - rx_stats->rx_bytes += skb->len + ETH_HLEN; - rx_stats->rx_packets++; + dev->stats.rx_bytes += skb->len + ETH_HLEN; + dev->stats.rx_packets++; skb->dev = dev; skb->pkt_type = PACKET_HOST; @@ -218,7 +202,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, } else txq->tx_dropped++; - return ret; + return NETDEV_TX_OK; } static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, @@ -381,47 +365,9 @@ static int macvlan_init(struct net_device *dev) macvlan_set_lockdep_class(dev); - vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats); - if (!vlan->rx_stats) - return -ENOMEM; - return 0; } -static void macvlan_uninit(struct net_device *dev) -{ - struct macvlan_dev *vlan = netdev_priv(dev); - - free_percpu(vlan->rx_stats); -} - -static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev) -{ - struct net_device_stats *stats = &dev->stats; - struct macvlan_dev *vlan = netdev_priv(dev); - - dev_txq_stats_fold(dev, stats); - - if (vlan->rx_stats) { - struct macvlan_rx_stats *p, rx = {0}; - int i; - - for_each_possible_cpu(i) { - p = per_cpu_ptr(vlan->rx_stats, i); - rx.rx_packets += p->rx_packets; - rx.rx_bytes += p->rx_bytes; - rx.rx_errors += p->rx_errors; - rx.multicast += p->multicast; - } - stats->rx_packets = rx.rx_packets; - stats->rx_bytes = rx.rx_bytes; - stats->rx_errors = rx.rx_errors; - stats->rx_dropped = rx.rx_errors; - stats->multicast = rx.multicast; - } - return stats; -} - static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { @@ -458,7 +404,6 @@ static const struct ethtool_ops macvlan_ethtool_ops = { static const struct net_device_ops macvlan_netdev_ops = { .ndo_init = macvlan_init, - .ndo_uninit = macvlan_uninit, .ndo_open = macvlan_open, .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, @@ -466,7 +411,6 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_multicast_list = macvlan_set_multicast_list, - .ndo_get_stats = macvlan_dev_get_stats, .ndo_validate_addr = eth_validate_addr, }; diff --git a/trunk/drivers/net/niu.c b/trunk/drivers/net/niu.c index 8ce58c4c7dd3..44558fcb56ac 100644 --- a/trunk/drivers/net/niu.c +++ b/trunk/drivers/net/niu.c @@ -8143,7 +8143,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np) int i; for (i = 0; i < len - 5; i++) { - if (!strncmp(s + i, "FCode ", 6)) + if (!strncmp(s + i, "FCode ", 5)) break; } if (i >= len - 5) diff --git a/trunk/drivers/net/pcmcia/fmvj18x_cs.c b/trunk/drivers/net/pcmcia/fmvj18x_cs.c index 57e09616330a..7e01fbdb87e0 100644 --- a/trunk/drivers/net/pcmcia/fmvj18x_cs.c +++ b/trunk/drivers/net/pcmcia/fmvj18x_cs.c @@ -264,7 +264,7 @@ static int fmvj18x_probe(struct pcmcia_device *link) /* Interrupt setup */ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; link->irq.IRQInfo1 = IRQ_LEVEL_ID; - link->irq.Handler = fjn_interrupt; + link->irq.Handler = &fjn_interrupt; link->irq.Instance = dev; /* General socket configuration */ diff --git a/trunk/drivers/net/pcmcia/nmclan_cs.c b/trunk/drivers/net/pcmcia/nmclan_cs.c index b12e69592d18..5ed6339c52bc 100644 --- a/trunk/drivers/net/pcmcia/nmclan_cs.c +++ b/trunk/drivers/net/pcmcia/nmclan_cs.c @@ -479,7 +479,7 @@ static int nmclan_probe(struct pcmcia_device *link) link->io.IOAddrLines = 5; link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; link->irq.IRQInfo1 = IRQ_LEVEL_ID; - link->irq.Handler = mace_interrupt; + link->irq.Handler = &mace_interrupt; link->irq.Instance = dev; link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.IntType = INT_MEMORY_AND_IO; diff --git a/trunk/drivers/net/ppp_async.c b/trunk/drivers/net/ppp_async.c index c311fa6597f5..30b1b3326765 100644 --- a/trunk/drivers/net/ppp_async.c +++ b/trunk/drivers/net/ppp_async.c @@ -36,7 +36,7 @@ #define PPP_VERSION "2.4.2" -#define OBUFSIZE 4096 +#define OBUFSIZE 256 /* Structure for storing local state. */ struct asyncppp { diff --git a/trunk/drivers/net/ppp_generic.c b/trunk/drivers/net/ppp_generic.c index 0a56a778af0a..9bf2a6be9031 100644 --- a/trunk/drivers/net/ppp_generic.c +++ b/trunk/drivers/net/ppp_generic.c @@ -184,7 +184,7 @@ static atomic_t ppp_unit_count = ATOMIC_INIT(0); static atomic_t channel_count = ATOMIC_INIT(0); /* per-net private data for this module */ -static int ppp_net_id __read_mostly; +static int ppp_net_id; struct ppp_net { /* units to ppp mapping */ struct idr units_idr; @@ -1944,15 +1944,8 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) } /* Pull completed packets off the queue and receive them. */ - while ((skb = ppp_mp_reconstruct(ppp))) { - if (pskb_may_pull(skb, 2)) - ppp_receive_nonmp_frame(ppp, skb); - else { - ++ppp->dev->stats.rx_length_errors; - kfree_skb(skb); - ppp_receive_error(ppp); - } - } + while ((skb = ppp_mp_reconstruct(ppp))) + ppp_receive_nonmp_frame(ppp, skb); return; diff --git a/trunk/drivers/net/pppoe.c b/trunk/drivers/net/pppoe.c index a1dcba255b06..60c8d233209f 100644 --- a/trunk/drivers/net/pppoe.c +++ b/trunk/drivers/net/pppoe.c @@ -97,7 +97,7 @@ static const struct proto_ops pppoe_ops; static struct ppp_channel_ops pppoe_chan_ops; /* per-net private data for this module */ -static int pppoe_net_id __read_mostly; +static int pppoe_net_id; struct pppoe_net { /* * we could use _single_ hash table for all diff --git a/trunk/drivers/net/pppol2tp.c b/trunk/drivers/net/pppol2tp.c index c58b50f8ba3b..849cc9c62c2a 100644 --- a/trunk/drivers/net/pppol2tp.c +++ b/trunk/drivers/net/pppol2tp.c @@ -232,7 +232,7 @@ static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; static const struct proto_ops pppol2tp_ops; /* per-net private data for this module */ -static int pppol2tp_net_id __read_mostly; +static int pppol2tp_net_id; struct pppol2tp_net { struct list_head pppol2tp_tunnel_list; rwlock_t pppol2tp_tunnel_list_lock; @@ -1537,7 +1537,7 @@ static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net, * if the tunnel socket goes away. */ tunnel->old_sk_destruct = sk->sk_destruct; - sk->sk_destruct = pppol2tp_tunnel_destruct; + sk->sk_destruct = &pppol2tp_tunnel_destruct; tunnel->sock = sk; sk->sk_allocation = GFP_ATOMIC; diff --git a/trunk/drivers/net/qlge/qlge.h b/trunk/drivers/net/qlge/qlge.h index 862c1aaf3860..1f59f054452d 100644 --- a/trunk/drivers/net/qlge/qlge.h +++ b/trunk/drivers/net/qlge/qlge.h @@ -16,7 +16,7 @@ */ #define DRV_NAME "qlge" #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "v1.00.00.23.00.00-01" +#define DRV_VERSION "v1.00.00-b3" #define PFX "qlge: " #define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ diff --git a/trunk/drivers/net/qlge/qlge_main.c b/trunk/drivers/net/qlge/qlge_main.c index 7692299e7826..bd8e164b121c 100644 --- a/trunk/drivers/net/qlge/qlge_main.c +++ b/trunk/drivers/net/qlge/qlge_main.c @@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define MSIX_IRQ 0 #define MSI_IRQ 1 #define LEG_IRQ 2 -static int qlge_irq_type = MSIX_IRQ; -module_param(qlge_irq_type, int, MSIX_IRQ); -MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); +static int irq_type = MSIX_IRQ; +module_param(irq_type, int, MSIX_IRQ); +MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static struct pci_device_id qlge_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, @@ -2870,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) int i, err; /* Get the MSIX vectors. */ - if (qlge_irq_type == MSIX_IRQ) { + if (irq_type == MSIX_IRQ) { /* Try to alloc space for the msix struct, * if it fails then go to MSI/legacy. */ @@ -2878,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) sizeof(struct msix_entry), GFP_KERNEL); if (!qdev->msi_x_entry) { - qlge_irq_type = MSI_IRQ; + irq_type = MSI_IRQ; goto msi; } @@ -2901,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) QPRINTK(qdev, IFUP, WARNING, "MSI-X Enable failed, trying MSI.\n"); qdev->intr_count = 1; - qlge_irq_type = MSI_IRQ; + irq_type = MSI_IRQ; } else if (err == 0) { set_bit(QL_MSIX_ENABLED, &qdev->flags); QPRINTK(qdev, IFUP, INFO, @@ -2912,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) } msi: qdev->intr_count = 1; - if (qlge_irq_type == MSI_IRQ) { + if (irq_type == MSI_IRQ) { if (!pci_enable_msi(qdev->pdev)) { set_bit(QL_MSI_ENABLED, &qdev->flags); QPRINTK(qdev, IFUP, INFO, @@ -2920,7 +2920,7 @@ static void ql_enable_msix(struct ql_adapter *qdev) return; } } - qlge_irq_type = LEG_IRQ; + irq_type = LEG_IRQ; QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); } @@ -3514,6 +3514,9 @@ int ql_wol(struct ql_adapter *qdev) } if (qdev->wol) { + /* Reroute all packets to Management Interface */ + ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM | + (MGMT_RCV_CFG_RM << 16))); wol |= MB_WOL_MODE_ON; status = ql_mb_wol_mode(qdev, wol); QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n", @@ -3714,10 +3717,6 @@ static int qlge_open(struct net_device *ndev) int err = 0; struct ql_adapter *qdev = netdev_priv(ndev); - err = ql_adapter_reset(qdev); - if (err) - return err; - err = ql_configure_rings(qdev); if (err) return err; @@ -3951,6 +3950,9 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) struct sockaddr *addr = p; int status; + if (netif_running(ndev)) + return -EBUSY; + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); diff --git a/trunk/drivers/net/r6040.c b/trunk/drivers/net/r6040.c index 0f30ea4e97ec..7dfcb58b0eb4 100644 --- a/trunk/drivers/net/r6040.c +++ b/trunk/drivers/net/r6040.c @@ -842,7 +842,7 @@ static int r6040_open(struct net_device *dev) int ret; /* Request IRQ and Register interrupt handler */ - ret = request_irq(dev->irq, r6040_interrupt, + ret = request_irq(dev->irq, &r6040_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; @@ -1085,7 +1085,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, int bar = 0; u16 *adrp; - printk("%s\n", version); + printk(KERN_INFO "%s\n", version); err = pci_enable_device(pdev); if (err) diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index 98f6c51b7608..1b0aa4cf89bc 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -3243,9 +3243,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, struct net_device *dev) { - unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + unsigned int mtu = dev->mtu; - tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; + tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; } static int rtl8169_open(struct net_device *dev) diff --git a/trunk/drivers/net/s2io.c b/trunk/drivers/net/s2io.c index 0dd7839322bc..ddccf5fa56b6 100644 --- a/trunk/drivers/net/s2io.c +++ b/trunk/drivers/net/s2io.c @@ -3494,7 +3494,6 @@ static void s2io_reset(struct s2io_nic *sp) /* Restore the PCI state saved during initialization. */ pci_restore_state(sp->pdev); - pci_save_state(sp->pdev); pci_read_config_word(sp->pdev, 0x2, &val16); if (check_pci_device_id(val16) != (u16)PCI_ANY_ID) break; diff --git a/trunk/drivers/net/smsc911x.c b/trunk/drivers/net/smsc911x.c index 7f01e60d5172..6a9f51d1d9f2 100644 --- a/trunk/drivers/net/smsc911x.c +++ b/trunk/drivers/net/smsc911x.c @@ -986,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) struct net_device *dev = pdata->dev; int npackets = 0; - while (npackets < budget) { + while (likely(netif_running(dev)) && (npackets < budget)) { unsigned int pktlength; unsigned int pktwords; struct sk_buff *skb; diff --git a/trunk/drivers/net/smsc9420.c b/trunk/drivers/net/smsc9420.c index 92e2bbe6b49b..b4909a2dec66 100644 --- a/trunk/drivers/net/smsc9420.c +++ b/trunk/drivers/net/smsc9420.c @@ -1161,7 +1161,7 @@ static int smsc9420_mii_probe(struct net_device *dev) phydev->phy_id); phydev = phy_connect(dev, dev_name(&phydev->dev), - smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); + &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index 6e6db955b4a9..47a4f0947872 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -68,8 +68,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.104" -#define DRV_MODULE_RELDATE "November 13, 2009" +#define DRV_MODULE_VERSION "3.103" +#define DRV_MODULE_RELDATE "November 2, 2009" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -137,12 +137,6 @@ #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) -#define TG3_RX_STD_BUFF_RING_SIZE \ - (sizeof(struct ring_info) * TG3_RX_RING_SIZE) - -#define TG3_RX_JMB_BUFF_RING_SIZE \ - (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) - /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) @@ -241,9 +235,6 @@ static struct pci_device_id tg3_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, - {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -405,7 +396,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) TG3_64BIT_REG_LOW, val); return; } - if (off == TG3_RX_STD_PROD_IDX_REG) { + if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + TG3_64BIT_REG_LOW, val); return; @@ -2258,7 +2249,7 @@ static void tg3_nvram_unlock(struct tg3 *tp) static void tg3_enable_nvram_access(struct tg3 *tp) { if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); @@ -2269,7 +2260,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp) static void tg3_disable_nvram_access(struct tg3 *tp) { if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { + !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { u32 nvaccess = tr32(NVRAM_ACCESS); tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); @@ -4406,17 +4397,6 @@ static void tg3_tx(struct tg3_napi *tnapi) } } -static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) -{ - if (!ri->skb) - return; - - pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), - map_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(ri->skb); - ri->skb = NULL; -} - /* Returns size of skb allocated or < 0 on error. * * We only need to fill in the address because the other members @@ -4428,14 +4408,16 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, - u32 opaque_key, u32 dest_idx_unmasked) +static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, + int src_idx, u32 dest_idx_unmasked) { + struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *desc; struct ring_info *map, *src_map; struct sk_buff *skb; dma_addr_t mapping; int skb_size, dest_idx; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; src_map = NULL; switch (opaque_key) { @@ -4443,6 +4425,8 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; + if (src_idx >= 0) + src_map = &tpr->rx_std_buffers[src_idx]; skb_size = tp->rx_pkt_map_sz; break; @@ -4450,6 +4434,8 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; + if (src_idx >= 0) + src_map = &tpr->rx_jmb_buffers[src_idx]; skb_size = TG3_RX_JMB_MAP_SZ; break; @@ -4479,6 +4465,9 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, map->skb = skb; pci_unmap_addr_set(map, mapping, mapping); + if (src_map != NULL) + src_map->skb = NULL; + desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); @@ -4489,32 +4478,30 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * members of the RX descriptor are invariant. See notes above * tg3_alloc_rx_skb for full details. */ -static void tg3_recycle_rx(struct tg3_napi *tnapi, - struct tg3_rx_prodring_set *dpr, - u32 opaque_key, int src_idx, - u32 dest_idx_unmasked) +static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, + int src_idx, u32 dest_idx_unmasked) { struct tg3 *tp = tnapi->tp; struct tg3_rx_buffer_desc *src_desc, *dest_desc; struct ring_info *src_map, *dest_map; int dest_idx; - struct tg3_rx_prodring_set *spr = &tp->prodring[0]; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; - dest_desc = &dpr->rx_std[dest_idx]; - dest_map = &dpr->rx_std_buffers[dest_idx]; - src_desc = &spr->rx_std[src_idx]; - src_map = &spr->rx_std_buffers[src_idx]; + dest_desc = &tpr->rx_std[dest_idx]; + dest_map = &tpr->rx_std_buffers[dest_idx]; + src_desc = &tpr->rx_std[src_idx]; + src_map = &tpr->rx_std_buffers[src_idx]; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; - dest_desc = &dpr->rx_jmb[dest_idx].std; - dest_map = &dpr->rx_jmb_buffers[dest_idx]; - src_desc = &spr->rx_jmb[src_idx].std; - src_map = &spr->rx_jmb_buffers[src_idx]; + dest_desc = &tpr->rx_jmb[dest_idx].std; + dest_map = &tpr->rx_jmb_buffers[dest_idx]; + src_desc = &tpr->rx_jmb[src_idx].std; + src_map = &tpr->rx_jmb_buffers[src_idx]; break; default: @@ -4526,6 +4513,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, pci_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; dest_desc->addr_lo = src_desc->addr_lo; + src_map->skb = NULL; } @@ -4557,11 +4545,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) { struct tg3 *tp = tnapi->tp; u32 work_mask, rx_std_posted = 0; - u32 std_prod_idx, jmb_prod_idx; u32 sw_idx = tnapi->rx_rcb_ptr; u16 hw_idx; int received; - struct tg3_rx_prodring_set *tpr = tnapi->prodring; + struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; hw_idx = *(tnapi->rx_rcb_prod_idx); /* @@ -4571,10 +4558,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) rmb(); work_mask = 0; received = 0; - std_prod_idx = tpr->rx_std_prod_idx; - jmb_prod_idx = tpr->rx_jmb_prod_idx; while (sw_idx != hw_idx && budget > 0) { - struct ring_info *ri; struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; unsigned int len; struct sk_buff *skb; @@ -4584,16 +4568,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { - ri = &tp->prodring[0].rx_std_buffers[desc_idx]; + struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; dma_addr = pci_unmap_addr(ri, mapping); skb = ri->skb; - post_ptr = &std_prod_idx; + post_ptr = &tpr->rx_std_ptr; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; + struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; dma_addr = pci_unmap_addr(ri, mapping); skb = ri->skb; - post_ptr = &jmb_prod_idx; + post_ptr = &tpr->rx_jmb_ptr; } else goto next_pkt_nopost; @@ -4602,7 +4586,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if ((desc->err_vlan & RXD_ERR_MASK) != 0 && (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { drop_it: - tg3_recycle_rx(tnapi, tpr, opaque_key, + tg3_recycle_rx(tnapi, opaque_key, desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ @@ -4622,13 +4606,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) ) { int skb_size; - skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, - *post_ptr); + skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, + desc_idx, *post_ptr); if (skb_size < 0) goto drop_it; - ri->skb = NULL; - pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); @@ -4636,7 +4618,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) } else { struct sk_buff *copy_skb; - tg3_recycle_rx(tnapi, tpr, opaque_key, + tg3_recycle_rx(tnapi, opaque_key, desc_idx, *post_ptr); copy_skb = netdev_alloc_skb(tp->dev, @@ -4687,7 +4669,9 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { u32 idx = *post_ptr % TG3_RX_RING_SIZE; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx); + + tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + + TG3_64BIT_REG_LOW, idx); work_mask &= ~RXD_OPAQUE_RING_STD; rx_std_posted = 0; } @@ -4707,45 +4691,33 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) tw32_rx_mbox(tnapi->consmbox, sw_idx); /* Refill RX ring(s). */ - if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) { - if (work_mask & RXD_OPAQUE_RING_STD) { - tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - tpr->rx_std_prod_idx); - } - if (work_mask & RXD_OPAQUE_RING_JUMBO) { - tpr->rx_jmb_prod_idx = jmb_prod_idx % - TG3_RX_JUMBO_RING_SIZE; - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, - tpr->rx_jmb_prod_idx); - } - mmiowb(); - } else if (work_mask) { - /* rx_std_buffers[] and rx_jmb_buffers[] entries must be - * updated before the producer indices can be updated. - */ - smp_wmb(); - - tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; - tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; - - napi_schedule(&tp->napi[1].napi); + if (work_mask & RXD_OPAQUE_RING_STD) { + sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; + tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, + sw_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; + tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, + sw_idx); } + mmiowb(); return received; } -static void tg3_poll_link(struct tg3 *tp) +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) { + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + /* handle link change and other phy events */ if (!(tp->tg3_flags & (TG3_FLAG_USE_LINKCHG_REG | TG3_FLAG_POLL_SERDES))) { - struct tg3_hw_status *sblk = tp->napi[0].hw_status; - if (sblk->status & SD_STATUS_LINK_CHG) { sblk->status = SD_STATUS_UPDATED | - (sblk->status & ~SD_STATUS_LINK_CHG); + (sblk->status & ~SD_STATUS_LINK_CHG); spin_lock(&tp->lock); if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { tw32_f(MAC_STATUS, @@ -4759,98 +4731,6 @@ static void tg3_poll_link(struct tg3 *tp) spin_unlock(&tp->lock); } } -} - -static void tg3_rx_prodring_xfer(struct tg3 *tp, - struct tg3_rx_prodring_set *dpr, - struct tg3_rx_prodring_set *spr) -{ - u32 si, di, cpycnt, src_prod_idx; - int i; - - while (1) { - src_prod_idx = spr->rx_std_prod_idx; - - /* Make sure updates to the rx_std_buffers[] entries and the - * standard producer index are seen in the correct order. - */ - smp_rmb(); - - if (spr->rx_std_cons_idx == src_prod_idx) - break; - - if (spr->rx_std_cons_idx < src_prod_idx) - cpycnt = src_prod_idx - spr->rx_std_cons_idx; - else - cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; - - cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); - - si = spr->rx_std_cons_idx; - di = dpr->rx_std_prod_idx; - - memcpy(&dpr->rx_std_buffers[di], - &spr->rx_std_buffers[si], - cpycnt * sizeof(struct ring_info)); - - for (i = 0; i < cpycnt; i++, di++, si++) { - struct tg3_rx_buffer_desc *sbd, *dbd; - sbd = &spr->rx_std[si]; - dbd = &dpr->rx_std[di]; - dbd->addr_hi = sbd->addr_hi; - dbd->addr_lo = sbd->addr_lo; - } - - spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % - TG3_RX_RING_SIZE; - dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % - TG3_RX_RING_SIZE; - } - - while (1) { - src_prod_idx = spr->rx_jmb_prod_idx; - - /* Make sure updates to the rx_jmb_buffers[] entries and - * the jumbo producer index are seen in the correct order. - */ - smp_rmb(); - - if (spr->rx_jmb_cons_idx == src_prod_idx) - break; - - if (spr->rx_jmb_cons_idx < src_prod_idx) - cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; - else - cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; - - cpycnt = min(cpycnt, - TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); - - si = spr->rx_jmb_cons_idx; - di = dpr->rx_jmb_prod_idx; - - memcpy(&dpr->rx_jmb_buffers[di], - &spr->rx_jmb_buffers[si], - cpycnt * sizeof(struct ring_info)); - - for (i = 0; i < cpycnt; i++, di++, si++) { - struct tg3_rx_buffer_desc *sbd, *dbd; - sbd = &spr->rx_jmb[si].std; - dbd = &dpr->rx_jmb[di].std; - dbd->addr_hi = sbd->addr_hi; - dbd->addr_lo = sbd->addr_lo; - } - - spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % - TG3_RX_JUMBO_RING_SIZE; - dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % - TG3_RX_JUMBO_RING_SIZE; - } -} - -static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) -{ - struct tg3 *tp = tnapi->tp; /* run TX completion thread */ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { @@ -4866,74 +4746,6 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_done += tg3_rx(tnapi, budget - work_done); - if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { - int i; - u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx; - u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx; - - for (i = 2; i < tp->irq_cnt; i++) - tg3_rx_prodring_xfer(tp, tnapi->prodring, - tp->napi[i].prodring); - - wmb(); - - if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) { - u32 mbox = TG3_RX_STD_PROD_IDX_REG; - tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx); - } - - if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) { - u32 mbox = TG3_RX_JMB_PROD_IDX_REG; - tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx); - } - - mmiowb(); - } - - return work_done; -} - -static int tg3_poll_msix(struct napi_struct *napi, int budget) -{ - struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); - struct tg3 *tp = tnapi->tp; - int work_done = 0; - struct tg3_hw_status *sblk = tnapi->hw_status; - - while (1) { - work_done = tg3_poll_work(tnapi, work_done, budget); - - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) - goto tx_recovery; - - if (unlikely(work_done >= budget)) - break; - - /* tp->last_tag is used in tg3_restart_ints() below - * to tell the hw how much work has been processed, - * so we must read it before checking for more work. - */ - tnapi->last_tag = sblk->status_tag; - tnapi->last_irq_tag = tnapi->last_tag; - rmb(); - - /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer == tnapi->tx_cons && - *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { - napi_complete(napi); - /* Reenable interrupts. */ - tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); - break; - } - } - - return work_done; - -tx_recovery: - /* work_done is guaranteed to be less than budget. */ - napi_complete(napi); - schedule_work(&tp->reset_task); return work_done; } @@ -4945,8 +4757,6 @@ static int tg3_poll(struct napi_struct *napi, int budget) struct tg3_hw_status *sblk = tnapi->hw_status; while (1) { - tg3_poll_link(tp); - work_done = tg3_poll_work(tnapi, work_done, budget); if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) @@ -5309,11 +5119,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); /* Workaround 4GB and 40-bit hardware DMA bugs. */ -static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff *skb, u32 last_plus_one, - u32 *start, u32 base_flags, u32 mss) +static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, + u32 last_plus_one, u32 *start, + u32 base_flags, u32 mss) { - struct tg3 *tp = tnapi->tp; + struct tg3_napi *tnapi = &tp->napi[0]; struct sk_buff *new_skb; dma_addr_t new_addr = 0; u32 entry = *start; @@ -5396,7 +5206,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry, } /* hard_start_xmit for devices that don't have any bugs and - * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. + * support TG3_FLG2_HW_TSO_2 only. */ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -5455,7 +5265,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, hdrlen = ip_tcp_len + tcp_opt_len; } - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { mss |= (hdrlen & 0xc) << 12; if (hdrlen & 0x10) base_flags |= 0x00000010; @@ -5582,13 +5392,9 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, struct skb_shared_info *sp; int would_hit_hwbug; dma_addr_t mapping; - struct tg3_napi *tnapi; - struct netdev_queue *txq; + struct tg3_napi *tnapi = &tp->napi[0]; - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) - tnapi++; + len = skb_headlen(skb); /* We are running in BH disabled context with netif_tx_lock * and TX reclaim runs via tp->napi.poll inside of a software @@ -5596,8 +5402,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, * no IRQ context deadlocks to worry about either. Rejoice! */ if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); /* This is a hard error, log it. */ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " @@ -5610,7 +5416,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, base_flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) base_flags |= TXD_FLAG_TCPUDP_CSUM; - + mss = 0; if ((mss = skb_shinfo(skb)->gso_size) != 0) { struct iphdr *iph; u32 tcp_opt_len, ip_tcp_len, hdr_len; @@ -5644,12 +5450,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, IPPROTO_TCP, 0); - if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { - mss |= (hdr_len & 0xc) << 12; - if (hdr_len & 0x10) - base_flags |= 0x00000010; - base_flags |= (hdr_len & 0x3e0) << 5; - } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) mss |= hdr_len << 9; else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { @@ -5674,10 +5475,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, (vlan_tx_tag_get(skb) << 16)); #endif - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && - !mss && skb->len > ETH_DATA_LEN) - base_flags |= TXD_FLAG_JMB_PKT; - if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { dev_kfree_skb(skb); goto out_unlock; @@ -5691,8 +5488,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, would_hit_hwbug = 0; - len = skb_headlen(skb); - if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) would_hit_hwbug = 1; @@ -5758,7 +5553,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ - if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, + if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, &start, base_flags, mss)) goto out_unlock; @@ -5766,13 +5561,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, } /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); + tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); tnapi->tx_prod = entry; if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); + netif_stop_queue(dev); if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); + netif_wake_queue(tp->dev); } out_unlock: @@ -5843,33 +5638,36 @@ static void tg3_rx_prodring_free(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { int i; + struct ring_info *rxp; - if (tpr != &tp->prodring[0]) { - for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; - i = (i + 1) % TG3_RX_RING_SIZE) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], - tp->rx_pkt_map_sz); - - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - for (i = tpr->rx_jmb_cons_idx; - i != tpr->rx_jmb_prod_idx; - i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], - TG3_RX_JMB_MAP_SZ); - } - } + for (i = 0; i < TG3_RX_RING_SIZE; i++) { + rxp = &tpr->rx_std_buffers[i]; - return; - } + if (rxp->skb == NULL) + continue; - for (i = 0; i < TG3_RX_RING_SIZE; i++) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], - tp->rx_pkt_map_sz); + pci_unmap_single(tp->pdev, + pci_unmap_addr(rxp, mapping), + tp->rx_pkt_map_sz, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(rxp->skb); + rxp->skb = NULL; + } if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], - TG3_RX_JMB_MAP_SZ); + for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { + rxp = &tpr->rx_jmb_buffers[i]; + + if (rxp->skb == NULL) + continue; + + pci_unmap_single(tp->pdev, + pci_unmap_addr(rxp, mapping), + TG3_RX_JMB_MAP_SZ, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(rxp->skb); + rxp->skb = NULL; + } } } @@ -5884,19 +5682,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { u32 i, rx_pkt_dma_sz; - - tpr->rx_std_cons_idx = 0; - tpr->rx_std_prod_idx = 0; - tpr->rx_jmb_cons_idx = 0; - tpr->rx_jmb_prod_idx = 0; - - if (tpr != &tp->prodring[0]) { - memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); - if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) - memset(&tpr->rx_jmb_buffers[0], 0, - TG3_RX_JMB_BUFF_RING_SIZE); - goto done; - } + struct tg3_napi *tnapi = &tp->napi[0]; /* Zero out all descriptors. */ memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); @@ -5923,7 +5709,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX standard ring, " "only %d out of %d buffers were allocated " @@ -5954,8 +5740,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, - i) < 0) { + if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, + -1, i) < 0) { printk(KERN_WARNING PFX "%s: Using a smaller RX jumbo ring, " "only %d out of %d buffers were " @@ -5999,7 +5785,8 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, static int tg3_rx_prodring_init(struct tg3 *tp, struct tg3_rx_prodring_set *tpr) { - tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); + tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * + TG3_RX_RING_SIZE, GFP_KERNEL); if (!tpr->rx_std_buffers) return -ENOMEM; @@ -6009,7 +5796,8 @@ static int tg3_rx_prodring_init(struct tg3 *tp, goto err_out; if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { - tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, + tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * + TG3_RX_JUMBO_RING_SIZE, GFP_KERNEL); if (!tpr->rx_jmb_buffers) goto err_out; @@ -6065,10 +5853,9 @@ static void tg3_free_rings(struct tg3 *tp) dev_kfree_skb_any(skb); } - - if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1) - tg3_rx_prodring_free(tp, &tp->prodring[j]); } + + tg3_rx_prodring_free(tp, &tp->prodring[0]); } /* Initialize tx/rx rings for packet processing. @@ -6102,13 +5889,9 @@ static int tg3_init_rings(struct tg3 *tp) tnapi->rx_rcb_ptr = 0; if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - - if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) && - tg3_rx_prodring_alloc(tp, &tp->prodring[i])) - return -ENOMEM; } - return 0; + return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); } /* @@ -6152,8 +5935,7 @@ static void tg3_free_consistent(struct tg3 *tp) tp->hw_stats = NULL; } - for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) - tg3_rx_prodring_fini(tp, &tp->prodring[i]); + tg3_rx_prodring_fini(tp, &tp->prodring[0]); } /* @@ -6164,10 +5946,8 @@ static int tg3_alloc_consistent(struct tg3 *tp) { int i; - for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) { - if (tg3_rx_prodring_init(tp, &tp->prodring[i])) - goto err_out; - } + if (tg3_rx_prodring_init(tp, &tp->prodring[0])) + return -ENOMEM; tp->hw_stats = pci_alloc_consistent(tp->pdev, sizeof(struct tg3_hw_stats), @@ -6211,11 +5991,6 @@ static int tg3_alloc_consistent(struct tg3 *tp) break; } - if (tp->irq_cnt == 1) - tnapi->prodring = &tp->prodring[0]; - else if (i) - tnapi->prodring = &tp->prodring[i - 1]; - /* * If multivector RSS is enabled, vector 0 does not handle * rx or tx interrupts. Don't allocate any resources for it. @@ -7504,12 +7279,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - val = tr32(TG3PCI_DMA_RW_CTRL) & - ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; - tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { /* This value is determined during the probe time DMA * engine test, tg3_test_dma. */ @@ -7632,9 +7404,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ((u64) tpr->rx_std_mapping >> 32)); tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tpr->rx_std_mapping & 0xffffffff)); - if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) - tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_BUFFER_DESC); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); /* Disable the mini ring */ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) @@ -7657,9 +7428,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | BDINFO_FLAGS_USE_EXT_RECV); - if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) - tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, - NIC_SRAM_RX_JUMBO_BUFFER_DESC); + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); @@ -7675,12 +7445,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); - tpr->rx_std_prod_idx = tp->rx_pending; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); + tpr->rx_std_ptr = tp->rx_pending; + tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, + tpr->rx_std_ptr); - tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? + tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); + tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, + tpr->rx_jmb_ptr); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { tw32(STD_REPLENISH_LWM, 32); @@ -7743,8 +7515,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; @@ -9734,16 +9505,15 @@ static int tg3_set_tso(struct net_device *dev, u32 value) return 0; } if ((dev->features & NETIF_F_IPV6_CSUM) && - ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { if (value) { dev->features |= NETIF_F_TSO6; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) dev->features |= NETIF_F_TSO_ECN; } else dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); @@ -11192,7 +10962,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: @@ -11233,7 +11003,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; protect = 1; } @@ -11327,7 +11097,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) /* NVRAM protection for TPM */ if (nvcfg1 & (1 << 27)) { - tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; protect = 1; } @@ -11829,7 +11599,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) tg3_enable_nvram_access(tp); if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && - !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) + !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) tw32(NVRAM_WRITE1, 0x406); grc_mode = tr32(GRC_MODE); @@ -12705,9 +12475,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { u32 prod_id_asic_rev; - if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || - tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S) pci_read_config_dword(tp->pdev, TG3PCI_GEN2_PRODID_ASICREV, &prod_id_asic_rev); @@ -12890,29 +12661,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->dev->features |= NETIF_F_IPV6_CSUM; } - /* Determine TSO capabilities */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; - else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; - else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { - tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && - tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) - tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; - } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { - tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) - tp->fw_needed = FIRMWARE_TG3TSO5; - else - tp->fw_needed = FIRMWARE_TG3TSO; - } - - tp->irq_max = 1; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || @@ -12924,21 +12672,31 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; + } else { + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; } + } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; - tp->irq_max = TG3_IRQ_MAX_VECS; - } + tp->irq_max = 1; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; + tp->irq_max = TG3_IRQ_MAX_VECS; } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; - else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { - tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; - tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; + if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; + else { + tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; + tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; + } } if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || @@ -13539,11 +13297,6 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) #endif #endif - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { - val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; - goto out; - } - if (!goal) goto out; @@ -13738,7 +13491,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) { dma_addr_t buf_dma; u32 *buf, saved_dma_rwctrl; - int ret = 0; + int ret; buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); if (!buf) { @@ -13751,9 +13504,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) - goto out; - if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { /* DMA read watermark not used on PCIE */ tp->dma_rwctrl |= 0x00180000; @@ -13826,6 +13576,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) tg3_switch_clocks(tp); #endif + ret = 0; if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) goto out; @@ -14004,7 +13755,6 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5756: return "5722/5756"; case PHY_ID_BCM5906: return "5906"; case PHY_ID_BCM5761: return "5761"; - case PHY_ID_BCM5717: return "5717"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -14246,6 +13996,51 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i < 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64); dev->ethtool_ops = &tg3_ethtool_ops; dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->irq = pdev->irq; @@ -14257,8 +14052,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_iounmap; } - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && - tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) dev->netdev_ops = &tg3_netdev_ops; else dev->netdev_ops = &tg3_netdev_ops_dma_bug; @@ -14305,39 +14099,46 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_init_bufmgr_config(tp); - /* Selectively allow TSO based on operating conditions */ - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || - (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) - tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; - else { - tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); - tp->fw_needed = NULL; - } - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) tp->fw_needed = FIRMWARE_TG3; + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; + } + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || + tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { + tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; + } else { + tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + /* TSO is on by default on chips that support hardware TSO. * Firmware TSO on older chips gives lower performance, so it * is off by default, but can be enabled using ethtool. */ - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && - (dev->features & NETIF_F_IP_CSUM)) - dev->features |= NETIF_F_TSO; - - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || - (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { - if (dev->features & NETIF_F_IPV6_CSUM) + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { + if (dev->features & NETIF_F_IP_CSUM) + dev->features |= NETIF_F_TSO; + if ((dev->features & NETIF_F_IPV6_CSUM) && + (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) dev->features |= NETIF_F_TSO6; - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) dev->features |= NETIF_F_TSO_ECN; } + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { @@ -14388,53 +14189,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; - rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; - sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; - for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; - - tnapi->tp = tp; - tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; - - tnapi->int_mbox = intmbx; - if (i < 4) - intmbx += 0x8; - else - intmbx += 0x4; - - tnapi->consmbox = rcvmbx; - tnapi->prodmbox = sndmbx; - - if (i) { - tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); - netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); - } else { - tnapi->coal_now = HOSTCC_MODE_NOW; - netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); - } - - if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) - break; - - /* - * If we support MSIX, we'll be using RSS. If we're using - * RSS, the first vector only handles link interrupts and the - * remaining vectors handle rx and tx interrupts. Reuse the - * mailbox values for the next iteration. The values we setup - * above are still useful for the single vectored mode. - */ - if (!i) - continue; - - rcvmbx += 0x8; - - if (sndmbx & 0x4) - sndmbx -= 0x4; - else - sndmbx += 0xc; - } - tg3_init_coal(tp); pci_set_drvdata(pdev, dev); diff --git a/trunk/drivers/net/tg3.h b/trunk/drivers/net/tg3.h index 453a34fb72b9..d770da124b85 100644 --- a/trunk/drivers/net/tg3.h +++ b/trunk/drivers/net/tg3.h @@ -46,9 +46,10 @@ #define TG3PCI_DEVICE_TIGON3_57788 0x1691 #define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ #define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ -#define TG3PCI_DEVICE_TIGON3_5717 0x1655 -#define TG3PCI_DEVICE_TIGON3_5718 0x1656 -#define TG3PCI_DEVICE_TIGON3_5724 0x165c +#define TG3PCI_DEVICE_TIGON3_5717C 0x1655 +#define TG3PCI_DEVICE_TIGON3_5717S 0x1656 +#define TG3PCI_DEVICE_TIGON3_5718C 0x1665 +#define TG3PCI_DEVICE_TIGON3_5718S 0x1666 /* 0x04 --> 0x64 unused */ #define TG3PCI_MSI_DATA 0x00000064 /* 0x66 --> 0x68 unused */ @@ -102,7 +103,6 @@ #define CHIPREV_ID_5906_A1 0xc001 #define CHIPREV_ID_57780_A0 0x57780000 #define CHIPREV_ID_57780_A1 0x57780001 -#define CHIPREV_ID_5717_A0 0x05717000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -141,7 +141,8 @@ #define METAL_REV_B1 0x01 #define METAL_REV_B2 0x02 #define TG3PCI_DMA_RW_CTRL 0x0000006c -#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 +#define DMA_RWCTRL_MIN_DMA 0x000000ff +#define DMA_RWCTRL_MIN_DMA_SHIFT 0 #define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 #define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 #define DMA_RWCTRL_READ_BNDRY_16 0x00000100 @@ -241,11 +242,7 @@ #define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ #define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ #define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ -#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \ - TG3_64BIT_REG_LOW) #define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ -#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \ - TG3_64BIT_REG_LOW) #define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ #define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ #define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ @@ -2573,10 +2570,8 @@ struct tg3_ethtool_stats { }; struct tg3_rx_prodring_set { - u32 rx_std_prod_idx; - u32 rx_std_cons_idx; - u32 rx_jmb_prod_idx; - u32 rx_jmb_cons_idx; + u32 rx_std_ptr; + u32 rx_jmb_ptr; struct tg3_rx_buffer_desc *rx_std; struct tg3_ext_rx_buffer_desc *rx_jmb; struct ring_info *rx_std_buffers; @@ -2604,7 +2599,6 @@ struct tg3_napi { u32 consmbox; u32 rx_rcb_ptr; u16 *rx_rcb_prod_idx; - struct tg3_rx_prodring_set *prodring; struct tg3_rx_buffer_desc *rx_rcb; struct tg3_tx_buffer_desc *tx_ring; @@ -2688,7 +2682,7 @@ struct tg3 { struct vlan_group *vlgrp; #endif - struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1]; + struct tg3_rx_prodring_set prodring[1]; /* begin "everything else" cacheline(s) section */ @@ -2759,7 +2753,7 @@ struct tg3 { #define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 #define TG3_FLG2_5705_PLUS 0x00040000 #define TG3_FLG2_5750_PLUS 0x00080000 -#define TG3_FLG2_HW_TSO_3 0x00100000 +#define TG3_FLG2_PROTECTED_NVRAM 0x00100000 #define TG3_FLG2_USING_MSI 0x00200000 #define TG3_FLG2_USING_MSIX 0x00400000 #define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ @@ -2771,9 +2765,7 @@ struct tg3 { #define TG3_FLG2_ICH_WORKAROUND 0x02000000 #define TG3_FLG2_5780_CLASS 0x04000000 #define TG3_FLG2_HW_TSO_2 0x08000000 -#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \ - TG3_FLG2_HW_TSO_2 | \ - TG3_FLG2_HW_TSO_3) +#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) #define TG3_FLG2_1SHOT_MSI 0x10000000 #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 @@ -2781,7 +2773,6 @@ struct tg3 { u32 tg3_flags3; #define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 #define TG3_FLG3_ENABLE_APE 0x00000002 -#define TG3_FLG3_PROTECTED_NVRAM 0x00000004 #define TG3_FLG3_5701_DMA_BUG 0x00000008 #define TG3_FLG3_USE_PHYLIB 0x00000010 #define TG3_FLG3_MDIOBUS_INITED 0x00000020 @@ -2864,7 +2855,6 @@ struct tg3 { #define PHY_ID_BCM5756 0xbc050ed0 #define PHY_ID_BCM5784 0xbc050fa0 #define PHY_ID_BCM5761 0xbc050fd0 -#define PHY_ID_BCM5717 0x5c0d8a00 #define PHY_ID_BCM5906 0xdc00ac40 #define PHY_ID_BCM8002 0x60010140 #define PHY_ID_INVALID 0xffffffff @@ -2906,7 +2896,7 @@ struct tg3 { (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ - (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002) + (X) == PHY_ID_BCM8002) struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; diff --git a/trunk/drivers/net/tokenring/3c359.c b/trunk/drivers/net/tokenring/3c359.c index cf552d1d9629..724158966ec1 100644 --- a/trunk/drivers/net/tokenring/3c359.c +++ b/trunk/drivers/net/tokenring/3c359.c @@ -610,8 +610,9 @@ static int xl_open(struct net_device *dev) u16 switchsettings, switchsettings_eeprom ; - if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev)) + if(request_irq(dev->irq, &xl_interrupt, IRQF_SHARED , "3c359", dev)) { return -EAGAIN; + } /* * Read the information from the EEPROM that we need. diff --git a/trunk/drivers/net/tokenring/olympic.c b/trunk/drivers/net/tokenring/olympic.c index df32025c5132..d9ec7f0bbd0a 100644 --- a/trunk/drivers/net/tokenring/olympic.c +++ b/trunk/drivers/net/tokenring/olympic.c @@ -445,9 +445,9 @@ static int olympic_open(struct net_device *dev) olympic_init(dev); - if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic", - dev)) + if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) { return -EAGAIN; + } #if OLYMPIC_DEBUG printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); diff --git a/trunk/drivers/net/typhoon.c b/trunk/drivers/net/typhoon.c index 4b7541024424..d6d345229fe9 100644 --- a/trunk/drivers/net/typhoon.c +++ b/trunk/drivers/net/typhoon.c @@ -2150,7 +2150,7 @@ typhoon_open(struct net_device *dev) goto out_sleep; } - err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED, + err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED, dev->name, dev); if(err < 0) goto out_sleep; diff --git a/trunk/drivers/net/via-rhine.c b/trunk/drivers/net/via-rhine.c index ec94ddf01f56..4535e89dfff1 100644 --- a/trunk/drivers/net/via-rhine.c +++ b/trunk/drivers/net/via-rhine.c @@ -1150,7 +1150,7 @@ static int rhine_open(struct net_device *dev) void __iomem *ioaddr = rp->base; int rc; - rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name, + rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name, dev); if (rc) return rc; diff --git a/trunk/drivers/net/via-velocity.c b/trunk/drivers/net/via-velocity.c index 1e6b395c555f..158f411bd555 100644 --- a/trunk/drivers/net/via-velocity.c +++ b/trunk/drivers/net/via-velocity.c @@ -2176,7 +2176,7 @@ static int velocity_open(struct net_device *dev) velocity_init_registers(vptr, VELOCITY_INIT_COLD); - ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, + ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, dev->name, dev); if (ret < 0) { /* Power down the chip */ diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_defs.h b/trunk/drivers/net/vmxnet3/vmxnet3_defs.h index b4889e6c4a57..dc8ee4438a4f 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/trunk/drivers/net/vmxnet3/vmxnet3_defs.h @@ -90,60 +90,23 @@ enum { VMXNET3_CMD_GET_CONF_INTR }; -/* - * Little Endian layout of bitfields - - * Byte 0 : 7.....len.....0 - * Byte 1 : rsvd gen 13.len.8 - * Byte 2 : 5.msscof.0 ext1 dtype - * Byte 3 : 13...msscof...6 - * - * Big Endian layout of bitfields - - * Byte 0: 13...msscof...6 - * Byte 1 : 5.msscof.0 ext1 dtype - * Byte 2 : rsvd gen 13.len.8 - * Byte 3 : 7.....len.....0 - * - * Thus, le32_to_cpu on the dword will allow the big endian driver to read - * the bit fields correctly. And cpu_to_le32 will convert bitfields - * bit fields written by big endian driver to format required by device. - */ - struct Vmxnet3_TxDesc { - __le64 addr; - -#ifdef __BIG_ENDIAN_BITFIELD - u32 msscof:14; /* MSS, checksum offset, flags */ - u32 ext1:1; - u32 dtype:1; /* descriptor type */ - u32 rsvd:1; - u32 gen:1; /* generation bit */ - u32 len:14; -#else - u32 len:14; - u32 gen:1; /* generation bit */ - u32 rsvd:1; - u32 dtype:1; /* descriptor type */ - u32 ext1:1; - u32 msscof:14; /* MSS, checksum offset, flags */ -#endif /* __BIG_ENDIAN_BITFIELD */ - -#ifdef __BIG_ENDIAN_BITFIELD - u32 tci:16; /* Tag to Insert */ - u32 ti:1; /* VLAN Tag Insertion */ - u32 ext2:1; - u32 cq:1; /* completion request */ - u32 eop:1; /* End Of Packet */ - u32 om:2; /* offload mode */ - u32 hlen:10; /* header len */ -#else - u32 hlen:10; /* header len */ - u32 om:2; /* offload mode */ - u32 eop:1; /* End Of Packet */ - u32 cq:1; /* completion request */ - u32 ext2:1; - u32 ti:1; /* VLAN Tag Insertion */ - u32 tci:16; /* Tag to Insert */ -#endif /* __BIG_ENDIAN_BITFIELD */ + u64 addr; + + u32 len:14; + u32 gen:1; /* generation bit */ + u32 rsvd:1; + u32 dtype:1; /* descriptor type */ + u32 ext1:1; + u32 msscof:14; /* MSS, checksum offset, flags */ + + u32 hlen:10; /* header len */ + u32 om:2; /* offload mode */ + u32 eop:1; /* End Of Packet */ + u32 cq:1; /* completion request */ + u32 ext2:1; + u32 ti:1; /* VLAN Tag Insertion */ + u32 tci:16; /* Tag to Insert */ }; /* TxDesc.OM values */ @@ -155,8 +118,6 @@ struct Vmxnet3_TxDesc { #define VMXNET3_TXD_EOP_SHIFT 12 #define VMXNET3_TXD_CQ_SHIFT 13 #define VMXNET3_TXD_GEN_SHIFT 14 -#define VMXNET3_TXD_EOP_DWORD_SHIFT 3 -#define VMXNET3_TXD_GEN_DWORD_SHIFT 2 #define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) #define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) @@ -169,40 +130,29 @@ struct Vmxnet3_TxDataDesc { u8 data[VMXNET3_HDR_COPY_SIZE]; }; -#define VMXNET3_TCD_GEN_SHIFT 31 -#define VMXNET3_TCD_GEN_SIZE 1 -#define VMXNET3_TCD_TXIDX_SHIFT 0 -#define VMXNET3_TCD_TXIDX_SIZE 12 -#define VMXNET3_TCD_GEN_DWORD_SHIFT 3 struct Vmxnet3_TxCompDesc { u32 txdIdx:12; /* Index of the EOP TxDesc */ u32 ext1:20; - __le32 ext2; - __le32 ext3; + u32 ext2; + u32 ext3; u32 rsvd:24; u32 type:7; /* completion type */ u32 gen:1; /* generation bit */ }; + struct Vmxnet3_RxDesc { - __le64 addr; + u64 addr; -#ifdef __BIG_ENDIAN_BITFIELD - u32 gen:1; /* Generation bit */ - u32 rsvd:15; - u32 dtype:1; /* Descriptor type */ - u32 btype:1; /* Buffer Type */ - u32 len:14; -#else u32 len:14; u32 btype:1; /* Buffer Type */ u32 dtype:1; /* Descriptor type */ u32 rsvd:15; u32 gen:1; /* Generation bit */ -#endif + u32 ext1; }; @@ -214,17 +164,8 @@ struct Vmxnet3_RxDesc { #define VMXNET3_RXD_BTYPE_SHIFT 14 #define VMXNET3_RXD_GEN_SHIFT 31 + struct Vmxnet3_RxCompDesc { -#ifdef __BIG_ENDIAN_BITFIELD - u32 ext2:1; - u32 cnc:1; /* Checksum Not Calculated */ - u32 rssType:4; /* RSS hash type used */ - u32 rqID:10; /* rx queue/ring ID */ - u32 sop:1; /* Start of Packet */ - u32 eop:1; /* End of Packet */ - u32 ext1:2; - u32 rxdIdx:12; /* Index of the RxDesc */ -#else u32 rxdIdx:12; /* Index of the RxDesc */ u32 ext1:2; u32 eop:1; /* End of Packet */ @@ -233,36 +174,14 @@ struct Vmxnet3_RxCompDesc { u32 rssType:4; /* RSS hash type used */ u32 cnc:1; /* Checksum Not Calculated */ u32 ext2:1; -#endif /* __BIG_ENDIAN_BITFIELD */ - __le32 rssHash; /* RSS hash value */ + u32 rssHash; /* RSS hash value */ -#ifdef __BIG_ENDIAN_BITFIELD - u32 tci:16; /* Tag stripped */ - u32 ts:1; /* Tag is stripped */ - u32 err:1; /* Error */ - u32 len:14; /* data length */ -#else u32 len:14; /* data length */ u32 err:1; /* Error */ u32 ts:1; /* Tag is stripped */ u32 tci:16; /* Tag stripped */ -#endif /* __BIG_ENDIAN_BITFIELD */ - -#ifdef __BIG_ENDIAN_BITFIELD - u32 gen:1; /* generation bit */ - u32 type:7; /* completion type */ - u32 fcs:1; /* Frame CRC correct */ - u32 frg:1; /* IP Fragment */ - u32 v4:1; /* IPv4 */ - u32 v6:1; /* IPv6 */ - u32 ipc:1; /* IP Checksum Correct */ - u32 tcp:1; /* TCP packet */ - u32 udp:1; /* UDP packet */ - u32 tuc:1; /* TCP/UDP Checksum Correct */ - u32 csum:16; -#else u32 csum:16; u32 tuc:1; /* TCP/UDP Checksum Correct */ u32 udp:1; /* UDP packet */ @@ -274,7 +193,6 @@ struct Vmxnet3_RxCompDesc { u32 fcs:1; /* Frame CRC correct */ u32 type:7; /* completion type */ u32 gen:1; /* generation bit */ -#endif /* __BIG_ENDIAN_BITFIELD */ }; /* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ @@ -288,8 +206,6 @@ struct Vmxnet3_RxCompDesc { /* csum OK for TCP/UDP pkts over IP */ #define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ 1 << VMXNET3_RCD_IPC_SHIFT) -#define VMXNET3_TXD_GEN_SIZE 1 -#define VMXNET3_TXD_EOP_SIZE 1 /* value of RxCompDesc.rssType */ enum { @@ -303,9 +219,9 @@ enum { /* a union for accessing all cmd/completion descriptors */ union Vmxnet3_GenericDesc { - __le64 qword[2]; - __le32 dword[4]; - __le16 word[8]; + u64 qword[2]; + u32 dword[4]; + u16 word[8]; struct Vmxnet3_TxDesc txd; struct Vmxnet3_RxDesc rxd; struct Vmxnet3_TxCompDesc tcd; @@ -371,24 +287,18 @@ enum { struct Vmxnet3_GOSInfo { -#ifdef __BIG_ENDIAN_BITFIELD - u32 gosMisc:10; /* other info about gos */ - u32 gosVer:16; /* gos version */ - u32 gosType:4; /* which guest */ - u32 gosBits:2; /* 32-bit or 64-bit? */ -#else - u32 gosBits:2; /* 32-bit or 64-bit? */ - u32 gosType:4; /* which guest */ - u32 gosVer:16; /* gos version */ - u32 gosMisc:10; /* other info about gos */ -#endif /* __BIG_ENDIAN_BITFIELD */ + u32 gosBits:2; /* 32-bit or 64-bit? */ + u32 gosType:4; /* which guest */ + u32 gosVer:16; /* gos version */ + u32 gosMisc:10; /* other info about gos */ }; + struct Vmxnet3_DriverInfo { - __le32 version; + u32 version; struct Vmxnet3_GOSInfo gos; - __le32 vmxnet3RevSpt; - __le32 uptVerSpt; + u32 vmxnet3RevSpt; + u32 uptVerSpt; }; @@ -405,42 +315,42 @@ struct Vmxnet3_DriverInfo { struct Vmxnet3_MiscConf { struct Vmxnet3_DriverInfo driverInfo; - __le64 uptFeatures; - __le64 ddPA; /* driver data PA */ - __le64 queueDescPA; /* queue descriptor table PA */ - __le32 ddLen; /* driver data len */ - __le32 queueDescLen; /* queue desc. table len in bytes */ - __le32 mtu; - __le16 maxNumRxSG; + u64 uptFeatures; + u64 ddPA; /* driver data PA */ + u64 queueDescPA; /* queue descriptor table PA */ + u32 ddLen; /* driver data len */ + u32 queueDescLen; /* queue desc. table len in bytes */ + u32 mtu; + u16 maxNumRxSG; u8 numTxQueues; u8 numRxQueues; - __le32 reserved[4]; + u32 reserved[4]; }; struct Vmxnet3_TxQueueConf { - __le64 txRingBasePA; - __le64 dataRingBasePA; - __le64 compRingBasePA; - __le64 ddPA; /* driver data */ - __le64 reserved; - __le32 txRingSize; /* # of tx desc */ - __le32 dataRingSize; /* # of data desc */ - __le32 compRingSize; /* # of comp desc */ - __le32 ddLen; /* size of driver data */ + u64 txRingBasePA; + u64 dataRingBasePA; + u64 compRingBasePA; + u64 ddPA; /* driver data */ + u64 reserved; + u32 txRingSize; /* # of tx desc */ + u32 dataRingSize; /* # of data desc */ + u32 compRingSize; /* # of comp desc */ + u32 ddLen; /* size of driver data */ u8 intrIdx; u8 _pad[7]; }; struct Vmxnet3_RxQueueConf { - __le64 rxRingBasePA[2]; - __le64 compRingBasePA; - __le64 ddPA; /* driver data */ - __le64 reserved; - __le32 rxRingSize[2]; /* # of rx desc */ - __le32 compRingSize; /* # of rx comp desc */ - __le32 ddLen; /* size of driver data */ + u64 rxRingBasePA[2]; + u64 compRingBasePA; + u64 ddPA; /* driver data */ + u64 reserved; + u32 rxRingSize[2]; /* # of rx desc */ + u32 compRingSize; /* # of rx comp desc */ + u32 ddLen; /* size of driver data */ u8 intrIdx; u8 _pad[7]; }; @@ -471,7 +381,7 @@ struct Vmxnet3_IntrConf { u8 eventIntrIdx; u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for * each intr */ - __le32 reserved[3]; + u32 reserved[3]; }; /* one bit per VLAN ID, the size is in the units of u32 */ @@ -481,21 +391,21 @@ struct Vmxnet3_IntrConf { struct Vmxnet3_QueueStatus { bool stopped; u8 _pad[3]; - __le32 error; + u32 error; }; struct Vmxnet3_TxQueueCtrl { - __le32 txNumDeferred; - __le32 txThreshold; - __le64 reserved; + u32 txNumDeferred; + u32 txThreshold; + u64 reserved; }; struct Vmxnet3_RxQueueCtrl { bool updateRxProd; u8 _pad[7]; - __le64 reserved; + u64 reserved; }; enum { @@ -507,11 +417,11 @@ enum { }; struct Vmxnet3_RxFilterConf { - __le32 rxMode; /* VMXNET3_RXM_xxx */ - __le16 mfTableLen; /* size of the multicast filter table */ - __le16 _pad1; - __le64 mfTablePA; /* PA of the multicast filters table */ - __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ + u32 rxMode; /* VMXNET3_RXM_xxx */ + u16 mfTableLen; /* size of the multicast filter table */ + u16 _pad1; + u64 mfTablePA; /* PA of the multicast filters table */ + u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ }; @@ -534,7 +444,7 @@ struct Vmxnet3_PM_PktFilter { struct Vmxnet3_PMConf { - __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ + u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ u8 numFilters; u8 pad[5]; struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; @@ -542,9 +452,9 @@ struct Vmxnet3_PMConf { struct Vmxnet3_VariableLenConfDesc { - __le32 confVer; - __le32 confLen; - __le64 confPA; + u32 confVer; + u32 confLen; + u64 confPA; }; @@ -581,12 +491,12 @@ struct Vmxnet3_DSDevRead { /* All structures in DriverShared are padded to multiples of 8 bytes */ struct Vmxnet3_DriverShared { - __le32 magic; + u32 magic; /* make devRead start at 64bit boundaries */ - __le32 pad; - struct Vmxnet3_DSDevRead devRead; - __le32 ecr; - __le32 reserved[5]; + u32 pad; + struct Vmxnet3_DSDevRead devRead; + u32 ecr; + u32 reserved[5]; }; diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c index a4c97e786ee5..004353a46af0 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c @@ -24,13 +24,12 @@ * */ -#include - #include "vmxnet3_int.h" char vmxnet3_driver_name[] = "vmxnet3"; #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" + /* * PCI Device ID Table * Last entry must be all 0s @@ -152,10 +151,11 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter) } } + static void vmxnet3_process_events(struct vmxnet3_adapter *adapter) { - u32 events = le32_to_cpu(adapter->shared->ecr); + u32 events = adapter->shared->ecr; if (!events) return; @@ -173,7 +173,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) if (adapter->tqd_start->status.stopped) { printk(KERN_ERR "%s: tq error 0x%x\n", adapter->netdev->name, - le32_to_cpu(adapter->tqd_start->status.error)); + adapter->tqd_start->status.error); } if (adapter->rqd_start->status.stopped) { printk(KERN_ERR "%s: rq error 0x%x\n", @@ -185,106 +185,6 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) } } -#ifdef __BIG_ENDIAN_BITFIELD -/* - * The device expects the bitfields in shared structures to be written in - * little endian. When CPU is big endian, the following routines are used to - * correctly read and write into ABI. - * The general technique used here is : double word bitfields are defined in - * opposite order for big endian architecture. Then before reading them in - * driver the complete double word is translated using le32_to_cpu. Similarly - * After the driver writes into bitfields, cpu_to_le32 is used to translate the - * double words into required format. - * In order to avoid touching bits in shared structure more than once, temporary - * descriptors are used. These are passed as srcDesc to following functions. - */ -static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc, - struct Vmxnet3_RxDesc *dstDesc) -{ - u32 *src = (u32 *)srcDesc + 2; - u32 *dst = (u32 *)dstDesc + 2; - dstDesc->addr = le64_to_cpu(srcDesc->addr); - *dst = le32_to_cpu(*src); - dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); -} - -static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc, - struct Vmxnet3_TxDesc *dstDesc) -{ - int i; - u32 *src = (u32 *)(srcDesc + 1); - u32 *dst = (u32 *)(dstDesc + 1); - - /* Working backwards so that the gen bit is set at the end. */ - for (i = 2; i > 0; i--) { - src--; - dst--; - *dst = cpu_to_le32(*src); - } -} - - -static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc, - struct Vmxnet3_RxCompDesc *dstDesc) -{ - int i = 0; - u32 *src = (u32 *)srcDesc; - u32 *dst = (u32 *)dstDesc; - for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) { - *dst = le32_to_cpu(*src); - src++; - dst++; - } -} - - -/* Used to read bitfield values from double words. */ -static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size) -{ - u32 temp = le32_to_cpu(*bitfield); - u32 mask = ((1 << size) - 1) << pos; - temp &= mask; - temp >>= pos; - return temp; -} - - - -#endif /* __BIG_ENDIAN_BITFIELD */ - -#ifdef __BIG_ENDIAN_BITFIELD - -# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \ - txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \ - VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE) -# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \ - txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \ - VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE) -# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \ - VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \ - VMXNET3_TCD_GEN_SIZE) -# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \ - VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE) -# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \ - (dstrcd) = (tmp); \ - vmxnet3_RxCompToCPU((rcd), (tmp)); \ - } while (0) -# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \ - (dstrxd) = (tmp); \ - vmxnet3_RxDescToCPU((rxd), (tmp)); \ - } while (0) - -#else - -# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen) -# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop) -# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen) -# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx) -# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) -# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd) - -#endif /* __BIG_ENDIAN_BITFIELD */ - static void vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, @@ -312,7 +212,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, /* no out of order completion */ BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); - BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); + BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); skb = tq->buf_info[eop_idx].skb; BUG_ON(skb == NULL); @@ -346,10 +246,9 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, union Vmxnet3_GenericDesc *gdesc; gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; - while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { - completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( - &gdesc->tcd), tq, adapter->pdev, - adapter); + while (gdesc->tcd.gen == tq->comp_ring.gen) { + completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, + adapter->pdev, adapter); vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; @@ -573,9 +472,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, } BUG_ON(rbi->dma_addr == 0); - gd->rxd.addr = cpu_to_le64(rbi->dma_addr); - gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT) - | val | rbi->len); + gd->rxd.addr = rbi->dma_addr; + gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | + rbi->len; num_allocated++; vmxnet3_cmd_ring_adv_next2fill(ring); @@ -632,10 +531,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, /* no need to map the buffer if headers are copied */ if (ctx->copy_size) { - ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + + ctx->sop_txd->txd.addr = tq->data_ring.basePA + tq->tx_ring.next2fill * - sizeof(struct Vmxnet3_TxDataDesc)); - ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); + sizeof(struct Vmxnet3_TxDataDesc); + ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; ctx->sop_txd->dword[3] = 0; tbi = tq->buf_info + tq->tx_ring.next2fill; @@ -643,8 +542,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, dev_dbg(&adapter->netdev->dev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", - tq->tx_ring.next2fill, - le64_to_cpu(ctx->sop_txd->txd.addr), + tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); @@ -672,14 +570,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); - gdesc->dword[2] = cpu_to_le32(dw2 | buf_size); + gdesc->txd.addr = tbi->dma_addr; + gdesc->dword[2] = dw2 | buf_size; gdesc->dword[3] = 0; dev_dbg(&adapter->netdev->dev, "txd[%u]: 0x%Lx 0x%x 0x%x\n", - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); + tq->tx_ring.next2fill, gdesc->txd.addr, + gdesc->dword[2], gdesc->dword[3]); vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; @@ -701,14 +599,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); - gdesc->dword[2] = cpu_to_le32(dw2 | frag->size); + gdesc->txd.addr = tbi->dma_addr; + gdesc->dword[2] = dw2 | frag->size; gdesc->dword[3] = 0; dev_dbg(&adapter->netdev->dev, "txd[%u]: 0x%llu %u %u\n", - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); + tq->tx_ring.next2fill, gdesc->txd.addr, + gdesc->dword[2], gdesc->dword[3]); vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; } @@ -853,10 +751,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, unsigned long flags; struct vmxnet3_tx_ctx ctx; union Vmxnet3_GenericDesc *gdesc; -#ifdef __BIG_ENDIAN_BITFIELD - /* Use temporary descriptor to avoid touching bits multiple times */ - union Vmxnet3_GenericDesc tempTxDesc; -#endif /* conservatively estimate # of descriptors to use */ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + @@ -933,22 +827,16 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); /* setup the EOP desc */ - ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); + ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; /* setup the SOP desc */ -#ifdef __BIG_ENDIAN_BITFIELD - gdesc = &tempTxDesc; - gdesc->dword[2] = ctx.sop_txd->dword[2]; - gdesc->dword[3] = ctx.sop_txd->dword[3]; -#else gdesc = ctx.sop_txd; -#endif if (ctx.mss) { gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; gdesc->txd.om = VMXNET3_OM_TSO; gdesc->txd.msscof = ctx.mss; - le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); + tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + + ctx.mss - 1) / ctx.mss; } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { gdesc->txd.hlen = ctx.eth_ip_hdr_size; @@ -959,7 +847,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.om = 0; gdesc->txd.msscof = 0; } - le32_add_cpu(&tq->shared->txNumDeferred, 1); + tq->shared->txNumDeferred++; } if (vlan_tx_tag_present(skb)) { @@ -967,27 +855,19 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.tci = vlan_tx_tag_get(skb); } - /* finally flips the GEN bit of the SOP desc. */ - gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ - VMXNET3_TXD_GEN); -#ifdef __BIG_ENDIAN_BITFIELD - /* Finished updating in bitfields of Tx Desc, so write them in original - * place. - */ - vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc, - (struct Vmxnet3_TxDesc *)ctx.sop_txd); - gdesc = ctx.sop_txd; -#endif + wmb(); + + /* finally flips the GEN bit of the SOP desc */ + gdesc->dword[2] ^= VMXNET3_TXD_GEN; dev_dbg(&adapter->netdev->dev, "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - - tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), - le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); + tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], + gdesc->dword[3]); spin_unlock_irqrestore(&tq->tx_lock, flags); - if (le32_to_cpu(tq->shared->txNumDeferred) >= - le32_to_cpu(tq->shared->txThreshold)) { + if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { tq->shared->txNumDeferred = 0; VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, tq->tx_ring.next2fill); @@ -1009,8 +889,9 @@ static netdev_tx_t vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct vmxnet3_tx_queue *tq = &adapter->tx_queue; - return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev); + return vmxnet3_tq_xmit(skb, tq, adapter, netdev); } @@ -1021,7 +902,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, { if (!gdesc->rcd.cnc && adapter->rxcsum) { /* typical case: TCP/UDP over IP and both csums are correct */ - if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == + if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { skb->ip_summed = CHECKSUM_UNNECESSARY; BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); @@ -1076,12 +957,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, u32 num_rxd = 0; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; -#ifdef __BIG_ENDIAN_BITFIELD - struct Vmxnet3_RxDesc rxCmdDesc; - struct Vmxnet3_RxCompDesc rxComp; -#endif - vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, - &rxComp); + + rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; while (rcd->gen == rq->comp_ring.gen) { struct vmxnet3_rx_buf_info *rbi; struct sk_buff *skb; @@ -1099,12 +976,11 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, idx = rcd->rxdIdx; ring_idx = rcd->rqID == rq->qid ? 0 : 1; - vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, - &rxCmdDesc); + + rxd = &rq->rx_ring[ring_idx].base[idx].rxd; rbi = rq->buf_info[ring_idx] + idx; - BUG_ON(rxd->addr != rbi->dma_addr || - rxd->len != rbi->len); + BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); if (unlikely(rcd->eop && rcd->err)) { vmxnet3_rx_error(rq, rcd, ctx, adapter); @@ -1202,8 +1078,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, } vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); - vmxnet3_getRxComp(rcd, - &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); + rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; } return num_rxd; @@ -1219,11 +1094,7 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, for (ring_idx = 0; ring_idx < 2; ring_idx++) { for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { -#ifdef __BIG_ENDIAN_BITFIELD - struct Vmxnet3_RxDesc rxDesc; -#endif - vmxnet3_getRxDesc(rxd, - &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); + rxd = &rq->rx_ring[ring_idx].base[i].rxd; if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && rq->buf_info[ring_idx][i].skb) { @@ -1475,12 +1346,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) err = request_irq(adapter->intr.msix_entries[0].vector, vmxnet3_intr, 0, adapter->netdev->name, adapter->netdev); - } else if (adapter->intr.type == VMXNET3_IT_MSI) { - err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, - adapter->netdev->name, adapter->netdev); } else #endif - { + if (adapter->intr.type == VMXNET3_IT_MSI) { + err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, + adapter->netdev->name, adapter->netdev); + } else { err = request_irq(adapter->pdev->irq, vmxnet3_intr, IRQF_SHARED, adapter->netdev->name, adapter->netdev); @@ -1541,22 +1412,6 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) } -inline void set_flag_le16(__le16 *data, u16 flag) -{ - *data = cpu_to_le16(le16_to_cpu(*data) | flag); -} - -inline void set_flag_le64(__le64 *data, u64 flag) -{ - *data = cpu_to_le64(le64_to_cpu(*data) | flag); -} - -inline void reset_flag_le64(__le64 *data, u64 flag) -{ - *data = cpu_to_le64(le64_to_cpu(*data) & ~flag); -} - - static void vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { @@ -1572,8 +1427,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) adapter->vlan_grp = grp; /* update FEATURES to device */ - set_flag_le64(&devRead->misc.uptFeatures, - UPT1_F_RXVLAN); + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); /* @@ -1596,7 +1450,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) struct Vmxnet3_DSDevRead *devRead = &shared->devRead; adapter->vlan_grp = NULL; - if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) { + if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { int i; for (i = 0; i < VMXNET3_VFT_SIZE; i++) { @@ -1609,8 +1463,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) VMXNET3_CMD_UPDATE_VLAN_FILTERS); /* update FEATURES to device */ - reset_flag_le64(&devRead->misc.uptFeatures, - UPT1_F_RXVLAN); + devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); } @@ -1712,10 +1565,9 @@ vmxnet3_set_mc(struct net_device *netdev) new_table = vmxnet3_copy_mc(netdev); if (new_table) { new_mode |= VMXNET3_RXM_MCAST; - rxConf->mfTableLen = cpu_to_le16( - netdev->mc_count * ETH_ALEN); - rxConf->mfTablePA = cpu_to_le64(virt_to_phys( - new_table)); + rxConf->mfTableLen = netdev->mc_count * + ETH_ALEN; + rxConf->mfTablePA = virt_to_phys(new_table); } else { printk(KERN_INFO "%s: failed to copy mcast list" ", setting ALL_MULTI\n", netdev->name); @@ -1730,7 +1582,7 @@ vmxnet3_set_mc(struct net_device *netdev) } if (new_mode != rxConf->rxMode) { - rxConf->rxMode = cpu_to_le32(new_mode); + rxConf->rxMode = new_mode; VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); } @@ -1758,69 +1610,63 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) memset(shared, 0, sizeof(*shared)); /* driver settings */ - shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); - devRead->misc.driverInfo.version = cpu_to_le32( - VMXNET3_DRIVER_VERSION_NUM); + shared->magic = VMXNET3_REV1_MAGIC; + devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; - *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( - *((u32 *)&devRead->misc.driverInfo.gos)); - devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); - devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); + devRead->misc.driverInfo.vmxnet3RevSpt = 1; + devRead->misc.driverInfo.uptVerSpt = 1; - devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); - devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); + devRead->misc.ddPA = virt_to_phys(adapter); + devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); /* set up feature flags */ if (adapter->rxcsum) - set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM); + devRead->misc.uptFeatures |= UPT1_F_RXCSUM; if (adapter->lro) { - set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO); - devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); + devRead->misc.uptFeatures |= UPT1_F_LRO; + devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; } if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && adapter->vlan_grp) { - set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN); + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; } - devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); - devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); - devRead->misc.queueDescLen = cpu_to_le32( - sizeof(struct Vmxnet3_TxQueueDesc) + - sizeof(struct Vmxnet3_RxQueueDesc)); + devRead->misc.mtu = adapter->netdev->mtu; + devRead->misc.queueDescPA = adapter->queue_desc_pa; + devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc); /* tx queue settings */ BUG_ON(adapter->tx_queue.tx_ring.base == NULL); devRead->misc.numTxQueues = 1; tqc = &adapter->tqd_start->conf; - tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA); - tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA); - tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA); - tqc->ddPA = cpu_to_le64(virt_to_phys( - adapter->tx_queue.buf_info)); - tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size); - tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size); - tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size); - tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) * - tqc->txRingSize); + tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; + tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; + tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; + tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); + tqc->txRingSize = adapter->tx_queue.tx_ring.size; + tqc->dataRingSize = adapter->tx_queue.data_ring.size; + tqc->compRingSize = adapter->tx_queue.comp_ring.size; + tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * + tqc->txRingSize; tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; /* rx queue settings */ devRead->misc.numRxQueues = 1; rqc = &adapter->rqd_start->conf; - rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA); - rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA); - rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA); - rqc->ddPA = cpu_to_le64(virt_to_phys( - adapter->rx_queue.buf_info)); - rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size); - rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size); - rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size); - rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) * - (rqc->rxRingSize[0] + rqc->rxRingSize[1])); + rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; + rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; + rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; + rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); + rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; + rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; + rqc->compRingSize = adapter->rx_queue.comp_ring.size; + rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * + (rqc->rxRingSize[0] + rqc->rxRingSize[1]); rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; /* intr settings */ @@ -1869,10 +1715,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) vmxnet3_setup_driver_shared(adapter); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( - adapter->shared_pa)); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( - adapter->shared_pa)); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, + VMXNET3_GET_ADDR_LO(adapter->shared_pa)); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, + VMXNET3_GET_ADDR_HI(adapter->shared_pa)); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); @@ -2578,7 +2425,7 @@ vmxnet3_suspend(struct device *device) memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ - set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; i++; } @@ -2620,21 +2467,19 @@ vmxnet3_suspend(struct device *device) pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ in_dev_put(in_dev); - set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; i++; } skip_arp: if (adapter->wol & WAKE_MAGIC) - set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC); + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; pmConf->numFilters = i; - adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); - adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( - *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confVer = 1; + adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); + adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_PMCFG); @@ -2665,11 +2510,9 @@ vmxnet3_resume(struct device *device) pmConf = adapter->pm_conf; memset(pmConf, 0, sizeof(*pmConf)); - adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); - adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( - *pmConf)); - adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys( - pmConf)); + adapter->shared->devRead.pmConfDesc.confVer = 1; + adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); + adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); netif_device_attach(netdev); pci_set_power_state(pdev, PCI_D0); diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c b/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c index 3935c4493fb7..c2c15e4cafc7 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) adapter->rxcsum = val; if (netif_running(netdev)) { if (val) - set_flag_le64( - &adapter->shared->devRead.misc.uptFeatures, - UPT1_F_RXCSUM); + adapter->shared->devRead.misc.uptFeatures |= + UPT1_F_RXCSUM; else - reset_flag_le64( - &adapter->shared->devRead.misc.uptFeatures, - UPT1_F_RXCSUM); + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_RXCSUM; VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_int.h b/trunk/drivers/net/vmxnet3/vmxnet3_int.h index 34f392f46fb1..445081686d5d 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_int.h +++ b/trunk/drivers/net/vmxnet3/vmxnet3_int.h @@ -330,14 +330,14 @@ struct vmxnet3_adapter { }; #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ - writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg)) + writel((val), (adapter)->hw_addr0 + (reg)) #define VMXNET3_READ_BAR0_REG(adapter, reg) \ - le32_to_cpu(readl((adapter)->hw_addr0 + (reg))) + readl((adapter)->hw_addr0 + (reg)) #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ - writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg)) + writel((val), (adapter)->hw_addr1 + (reg)) #define VMXNET3_READ_BAR1_REG(adapter, reg) \ - le32_to_cpu(readl((adapter)->hw_addr1 + (reg))) + readl((adapter)->hw_addr1 + (reg)) #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ @@ -353,10 +353,6 @@ struct vmxnet3_adapter { #define VMXNET3_MAX_ETH_HDR_SIZE 22 #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) -void set_flag_le16(__le16 *data, u16 flag); -void set_flag_le64(__le64 *data, u64 flag); -void reset_flag_le64(__le64 *data, u64 flag); - int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); diff --git a/trunk/drivers/net/wan/dscc4.c b/trunk/drivers/net/wan/dscc4.c index 63a010252a37..81c8aec9df92 100644 --- a/trunk/drivers/net/wan/dscc4.c +++ b/trunk/drivers/net/wan/dscc4.c @@ -1127,7 +1127,7 @@ static int dscc4_open(struct net_device *dev) init_timer(&dpriv->timer); dpriv->timer.expires = jiffies + 10*HZ; dpriv->timer.data = (unsigned long)dev; - dpriv->timer.function = dscc4_timer; + dpriv->timer.function = &dscc4_timer; add_timer(&dpriv->timer); netif_carrier_on(dev); diff --git a/trunk/drivers/net/wireless/adm8211.c b/trunk/drivers/net/wireless/adm8211.c index 39410016b4ff..b80f514877d8 100644 --- a/trunk/drivers/net/wireless/adm8211.c +++ b/trunk/drivers/net/wireless/adm8211.c @@ -1538,7 +1538,7 @@ static int adm8211_start(struct ieee80211_hw *dev) adm8211_hw_init(dev); adm8211_rf_set_channel(dev, priv->channel); - retval = request_irq(priv->pdev->irq, adm8211_interrupt, + retval = request_irq(priv->pdev->irq, &adm8211_interrupt, IRQF_SHARED, "adm8211", dev); if (retval) { printk(KERN_ERR "%s: failed to register IRQ handler\n", diff --git a/trunk/drivers/net/wireless/b43/dma.c b/trunk/drivers/net/wireless/b43/dma.c index de4e804bedf0..b5cd7f57055b 100644 --- a/trunk/drivers/net/wireless/b43/dma.c +++ b/trunk/drivers/net/wireless/b43/dma.c @@ -1157,18 +1157,17 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) } static int dma_tx_fragment(struct b43_dmaring *ring, - struct sk_buff **in_skb) + struct sk_buff *skb) { - struct sk_buff *skb = *in_skb; const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta_hdr; - struct sk_buff *bounce_skb; u16 cookie; size_t hdrsize = b43_txhdr_size(ring->dev); @@ -1212,34 +1211,28 @@ static int dma_tx_fragment(struct b43_dmaring *ring, meta->skb = skb; meta->is_last_fragment = 1; + priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); - if (!bounce_skb) { + priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA); + if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } + memcpy(priv_info->bouncebuffer, skb->data, skb->len); - memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); - memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); - bounce_skb->dev = skb->dev; - skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); - info = IEEE80211_SKB_CB(bounce_skb); - - dev_kfree_skb_any(skb); - skb = bounce_skb; - *in_skb = bounce_skb; - meta->skb = skb; - meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { + kfree(priv_info->bouncebuffer); + priv_info->bouncebuffer = NULL; ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; - goto out_free_bounce; + goto out_unmap_hdr; } } @@ -1256,8 +1249,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ops->poke_tx(ring, next_slot(ring, slot)); return 0; -out_free_bounce: - dev_kfree_skb_any(skb); out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, hdrsize, 1); @@ -1362,11 +1353,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) * static, so we don't need to store it per frame. */ ring->queue_prio = skb_get_queue_mapping(skb); - /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing - * into the skb data or cb now. */ - hdr = NULL; - info = NULL; - err = dma_tx_fragment(ring, &skb); + err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ @@ -1413,12 +1400,17 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); desc = ops->idx2desc(ring, slot, &meta); - if (meta->skb) - unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, - 1); - else + if (meta->skb) { + struct b43_private_tx_info *priv_info = + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + + unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); + kfree(priv_info->bouncebuffer); + priv_info->bouncebuffer = NULL; + } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); + } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; diff --git a/trunk/drivers/net/wireless/b43/xmit.h b/trunk/drivers/net/wireless/b43/xmit.h index 3530de871873..d23ff9fe0c9e 100644 --- a/trunk/drivers/net/wireless/b43/xmit.h +++ b/trunk/drivers/net/wireless/b43/xmit.h @@ -2,6 +2,8 @@ #define B43_XMIT_H_ #include "main.h" +#include + #define _b43_declare_plcp_hdr(size) \ struct b43_plcp_hdr##size { \ @@ -332,4 +334,21 @@ static inline u8 b43_kidx_to_raw(struct b43_wldev *dev, u8 firmware_kidx) return raw_kidx; } +/* struct b43_private_tx_info - TX info private to b43. + * The structure is placed in (struct ieee80211_tx_info *)->rate_driver_data + * + * @bouncebuffer: DMA Bouncebuffer (if used) + */ +struct b43_private_tx_info { + void *bouncebuffer; +}; + +static inline struct b43_private_tx_info * +b43_get_priv_tx_info(struct ieee80211_tx_info *info) +{ + BUILD_BUG_ON(sizeof(struct b43_private_tx_info) > + sizeof(info->rate_driver_data)); + return (struct b43_private_tx_info *)info->rate_driver_data; +} + #endif /* B43_XMIT_H_ */ diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c index 6e2fc0cb6f8a..240cff1e6979 100644 --- a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c @@ -6029,7 +6029,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, struct ipw2100_priv *priv; struct net_device *dev; - dev = alloc_ieee80211(sizeof(struct ipw2100_priv)); + dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); if (!dev) return NULL; priv = libipw_priv(dev); @@ -6342,7 +6342,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group); - free_ieee80211(dev); + free_ieee80211(dev, 0); pci_set_drvdata(pci_dev, NULL); } @@ -6400,7 +6400,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) if (dev->base_addr) iounmap((void __iomem *)dev->base_addr); - free_ieee80211(dev); + free_ieee80211(dev, 0); } pci_release_regions(pci_dev); diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2200.c b/trunk/drivers/net/wireless/ipw2x00/ipw2200.c index 5c6ff58732d5..61ef8904af97 100644 --- a/trunk/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/trunk/drivers/net/wireless/ipw2x00/ipw2200.c @@ -103,6 +103,25 @@ static int antenna = CFG_SYS_ANTENNA_BOTH; static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ #endif +static struct ieee80211_rate ipw2200_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60 }, + { .bitrate = 90 }, + { .bitrate = 120 }, + { .bitrate = 180 }, + { .bitrate = 240 }, + { .bitrate = 360 }, + { .bitrate = 480 }, + { .bitrate = 540 } +}; + +#define ipw2200_a_rates (ipw2200_rates + 4) +#define ipw2200_num_a_rates 8 +#define ipw2200_bg_rates (ipw2200_rates + 0) +#define ipw2200_num_bg_rates 12 #ifdef CONFIG_IPW2200_QOS static int qos_enable = 0; @@ -8654,24 +8673,6 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option) * */ -static int ipw_wx_get_name(struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, char *extra) -{ - struct ipw_priv *priv = libipw_priv(dev); - mutex_lock(&priv->mutex); - if (priv->status & STATUS_RF_KILL_MASK) - strcpy(wrqu->name, "radio off"); - else if (!(priv->status & STATUS_ASSOCIATED)) - strcpy(wrqu->name, "unassociated"); - else - snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", - ipw_modes[priv->assoc_request.ieee_mode]); - IPW_DEBUG_WX("Name: %s\n", wrqu->name); - mutex_unlock(&priv->mutex); - return 0; -} - static int ipw_set_channel(struct ipw_priv *priv, u8 channel) { if (channel == 0) { @@ -9971,7 +9972,7 @@ static int ipw_wx_sw_reset(struct net_device *dev, /* Rebase the WE IOCTLs to zero for the handler array */ #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] static iw_handler ipw_wx_handlers[] = { - IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name, + IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, @@ -11416,16 +11417,100 @@ static void ipw_bg_down(struct work_struct *work) /* Called by register_netdev() */ static int ipw_net_init(struct net_device *dev) { + int i, rc = 0; struct ipw_priv *priv = libipw_priv(dev); + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + struct wireless_dev *wdev = &priv->ieee->wdev; mutex_lock(&priv->mutex); if (ipw_up(priv)) { - mutex_unlock(&priv->mutex); - return -EIO; + rc = -EIO; + goto out; } + memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); + + /* fill-out priv->ieee->bg_band */ + if (geo->bg_channels) { + struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; + + bg_band->band = IEEE80211_BAND_2GHZ; + bg_band->n_channels = geo->bg_channels; + bg_band->channels = + kzalloc(geo->bg_channels * + sizeof(struct ieee80211_channel), GFP_KERNEL); + /* translate geo->bg to bg_band.channels */ + for (i = 0; i < geo->bg_channels; i++) { + bg_band->channels[i].band = IEEE80211_BAND_2GHZ; + bg_band->channels[i].center_freq = geo->bg[i].freq; + bg_band->channels[i].hw_value = geo->bg[i].channel; + bg_band->channels[i].max_power = geo->bg[i].max_power; + if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) + bg_band->channels[i].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) + bg_band->channels[i].flags |= + IEEE80211_CHAN_NO_IBSS; + if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) + bg_band->channels[i].flags |= + IEEE80211_CHAN_RADAR; + /* No equivalent for LIBIPW_CH_80211H_RULES, + LIBIPW_CH_UNIFORM_SPREADING, or + LIBIPW_CH_B_ONLY... */ + } + /* point at bitrate info */ + bg_band->bitrates = ipw2200_bg_rates; + bg_band->n_bitrates = ipw2200_num_bg_rates; + + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; + } + + /* fill-out priv->ieee->a_band */ + if (geo->a_channels) { + struct ieee80211_supported_band *a_band = &priv->ieee->a_band; + + a_band->band = IEEE80211_BAND_5GHZ; + a_band->n_channels = geo->a_channels; + a_band->channels = + kzalloc(geo->a_channels * + sizeof(struct ieee80211_channel), GFP_KERNEL); + /* translate geo->bg to a_band.channels */ + for (i = 0; i < geo->a_channels; i++) { + a_band->channels[i].band = IEEE80211_BAND_2GHZ; + a_band->channels[i].center_freq = geo->a[i].freq; + a_band->channels[i].hw_value = geo->a[i].channel; + a_band->channels[i].max_power = geo->a[i].max_power; + if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) + a_band->channels[i].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + if (geo->a[i].flags & LIBIPW_CH_NO_IBSS) + a_band->channels[i].flags |= + IEEE80211_CHAN_NO_IBSS; + if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT) + a_band->channels[i].flags |= + IEEE80211_CHAN_RADAR; + /* No equivalent for LIBIPW_CH_80211H_RULES, + LIBIPW_CH_UNIFORM_SPREADING, or + LIBIPW_CH_B_ONLY... */ + } + /* point at bitrate info */ + a_band->bitrates = ipw2200_a_rates; + a_band->n_bitrates = ipw2200_num_a_rates; + + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; + } + + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); + + /* With that information in place, we can now register the wiphy... */ + if (wiphy_register(wdev->wiphy)) { + rc = -EIO; + goto out; + } + +out: mutex_unlock(&priv->mutex); - return 0; + return rc; } /* PCI driver stuff */ @@ -11556,7 +11641,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv) if (priv->prom_net_dev) return -EPERM; - priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv)); + priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); if (priv->prom_net_dev == NULL) return -ENOMEM; @@ -11575,7 +11660,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv) rc = register_netdev(priv->prom_net_dev); if (rc) { - free_ieee80211(priv->prom_net_dev); + free_ieee80211(priv->prom_net_dev, 1); priv->prom_net_dev = NULL; return rc; } @@ -11589,7 +11674,7 @@ static void ipw_prom_free(struct ipw_priv *priv) return; unregister_netdev(priv->prom_net_dev); - free_ieee80211(priv->prom_net_dev); + free_ieee80211(priv->prom_net_dev, 1); priv->prom_net_dev = NULL; } @@ -11617,7 +11702,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, struct ipw_priv *priv; int i; - net_dev = alloc_ieee80211(sizeof(struct ipw_priv)); + net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); if (net_dev == NULL) { err = -ENOMEM; goto out; @@ -11765,7 +11850,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); out_free_ieee80211: - free_ieee80211(priv->net_dev); + free_ieee80211(priv->net_dev, 0); out: return err; } @@ -11832,7 +11917,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); - free_ieee80211(priv->net_dev); + free_ieee80211(priv->net_dev, 0); free_firmware(); } diff --git a/trunk/drivers/net/wireless/ipw2x00/libipw.h b/trunk/drivers/net/wireless/ipw2x00/libipw.h index 1e334ff6bd52..bf45391172f3 100644 --- a/trunk/drivers/net/wireless/ipw2x00/libipw.h +++ b/trunk/drivers/net/wireless/ipw2x00/libipw.h @@ -31,6 +31,7 @@ #include #include +#include #define LIBIPW_VERSION "git-1.1.13" @@ -783,12 +784,15 @@ struct libipw_geo { struct libipw_device { struct net_device *dev; + struct wireless_dev wdev; struct libipw_security sec; /* Bookkeeping structures */ struct libipw_stats ieee_stats; struct libipw_geo geo; + struct ieee80211_supported_band bg_band; + struct ieee80211_supported_band a_band; /* Probe / Beacon management */ struct list_head network_free_list; @@ -1014,8 +1018,8 @@ static inline int libipw_is_cck_rate(u8 rate) } /* ieee80211.c */ -extern void free_ieee80211(struct net_device *dev); -extern struct net_device *alloc_ieee80211(int sizeof_priv); +extern void free_ieee80211(struct net_device *dev, int monitor); +extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); extern int libipw_change_mtu(struct net_device *dev, int new_mtu); extern void libipw_networks_age(struct libipw_device *ieee, diff --git a/trunk/drivers/net/wireless/ipw2x00/libipw_module.c b/trunk/drivers/net/wireless/ipw2x00/libipw_module.c index eb2b60834c17..a0e9f6aed7da 100644 --- a/trunk/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/trunk/drivers/net/wireless/ipw2x00/libipw_module.c @@ -62,6 +62,9 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); +struct cfg80211_ops libipw_config_ops = { }; +void *libipw_wiphy_privid = &libipw_wiphy_privid; + static int libipw_networks_allocate(struct libipw_device *ieee) { if (ieee->networks) @@ -140,7 +143,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu) } EXPORT_SYMBOL(libipw_change_mtu); -struct net_device *alloc_ieee80211(int sizeof_priv) +struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) { struct libipw_device *ieee; struct net_device *dev; @@ -157,10 +160,31 @@ struct net_device *alloc_ieee80211(int sizeof_priv) ieee->dev = dev; + if (!monitor) { + ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0); + if (!ieee->wdev.wiphy) { + LIBIPW_ERROR("Unable to allocate wiphy.\n"); + goto failed_free_netdev; + } + + ieee->dev->ieee80211_ptr = &ieee->wdev; + ieee->wdev.iftype = NL80211_IFTYPE_STATION; + + /* Fill-out wiphy structure bits we know... Not enough info + here to call set_wiphy_dev or set MAC address or channel info + -- have to do that in ->ndo_init... */ + ieee->wdev.wiphy->privid = libipw_wiphy_privid; + + ieee->wdev.wiphy->max_scan_ssids = 1; + ieee->wdev.wiphy->max_scan_ie_len = 0; + ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_ADHOC); + } + err = libipw_networks_allocate(ieee); if (err) { LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); - goto failed_free_netdev; + goto failed_free_wiphy; } libipw_networks_initialize(ieee); @@ -193,19 +217,31 @@ struct net_device *alloc_ieee80211(int sizeof_priv) return dev; +failed_free_wiphy: + if (!monitor) + wiphy_free(ieee->wdev.wiphy); failed_free_netdev: free_netdev(dev); failed: return NULL; } -void free_ieee80211(struct net_device *dev) +void free_ieee80211(struct net_device *dev, int monitor) { struct libipw_device *ieee = netdev_priv(dev); lib80211_crypt_info_free(&ieee->crypt_info); libipw_networks_free(ieee); + + /* free cfg80211 resources */ + if (!monitor) { + wiphy_unregister(ieee->wdev.wiphy); + kfree(ieee->a_band.channels); + kfree(ieee->bg_band.channels); + wiphy_free(ieee->wdev.wiphy); + } + free_netdev(dev); } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c index 8f82537045bf..1e387b9dce1e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -170,7 +170,6 @@ struct iwl_cfg iwl1000_bgn_cfg = { .shadow_ram_support = false, .ht_greenfield_support = true, .led_compensation = 51, - .use_rts_for_ht = true, /* use rts/cts protection */ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .support_ct_kill_exit = true, }; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/trunk/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index d4b49883b30e..dc81e19674f7 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -355,7 +355,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, init_timer(&rs_sta->rate_scale_flush); rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; - rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; + rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush; for (i = 0; i < IWL_RATE_COUNT_3945; i++) iwl3945_clear_window(&rs_sta->win[i]); diff --git a/trunk/drivers/net/wireless/p54/p54pci.c b/trunk/drivers/net/wireless/p54/p54pci.c index a15962a19b2a..d348c265e867 100644 --- a/trunk/drivers/net/wireless/p54/p54pci.c +++ b/trunk/drivers/net/wireless/p54/p54pci.c @@ -411,7 +411,7 @@ static int p54p_open(struct ieee80211_hw *dev) int err; init_completion(&priv->boot_comp); - err = request_irq(priv->pdev->irq, p54p_interrupt, + err = request_irq(priv->pdev->irq, &p54p_interrupt, IRQF_SHARED, "p54pci", dev); if (err) { dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); diff --git a/trunk/drivers/net/wireless/p54/p54usb.c b/trunk/drivers/net/wireless/p54/p54usb.c index 92af9b96bb7a..17e199546eeb 100644 --- a/trunk/drivers/net/wireless/p54/p54usb.c +++ b/trunk/drivers/net/wireless/p54/p54usb.c @@ -426,16 +426,12 @@ static const char p54u_romboot_3887[] = "~~~~"; static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; - u8 *buf; + u8 buf[4]; int ret; - buf = kmalloc(4, GFP_KERNEL); - if (!buf) - return -ENOMEM; - memcpy(buf, p54u_romboot_3887, 4); + memcpy(&buf, p54u_romboot_3887, sizeof(buf)); ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, - buf, 4); - kfree(buf); + buf, sizeof(buf)); if (ret) dev_err(&priv->udev->dev, "(p54usb) unable to jump to " "boot ROM (%d)!\n", ret); diff --git a/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c b/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c index a1a3dd15c664..16429c49139c 100644 --- a/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/trunk/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -548,7 +548,7 @@ static int rtl8180_start(struct ieee80211_hw *dev) rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); - ret = request_irq(priv->pdev->irq, rtl8180_interrupt, + ret = request_irq(priv->pdev->irq, &rtl8180_interrupt, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) { printk(KERN_ERR "%s: failed to register IRQ handler\n", diff --git a/trunk/drivers/s390/net/Makefile b/trunk/drivers/s390/net/Makefile index 6cab5a62f99e..96eddb3b1d08 100644 --- a/trunk/drivers/s390/net/Makefile +++ b/trunk/drivers/s390/net/Makefile @@ -3,11 +3,11 @@ # ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o -obj-$(CONFIG_CTCM) += ctcm.o fsm.o +obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o -obj-$(CONFIG_LCS) += lcs.o -obj-$(CONFIG_CLAW) += claw.o +obj-$(CONFIG_LCS) += lcs.o cu3088.o +obj-$(CONFIG_CLAW) += claw.o cu3088.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o diff --git a/trunk/drivers/s390/net/claw.c b/trunk/drivers/s390/net/claw.c index 3c77bfe0764c..c63babefb698 100644 --- a/trunk/drivers/s390/net/claw.c +++ b/trunk/drivers/s390/net/claw.c @@ -90,6 +90,7 @@ #include #include +#include "cu3088.h" #include "claw.h" /* @@ -257,9 +258,6 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev) return -EPERM; } -/* the root device for claw group devices */ -static struct device *claw_root_dev; - /* ccwgroup table */ static struct ccwgroup_driver claw_group_driver = { @@ -274,47 +272,6 @@ static struct ccwgroup_driver claw_group_driver = { .prepare = claw_pm_prepare, }; -static struct ccw_device_id claw_ids[] = { - {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, claw_ids); - -static struct ccw_driver claw_ccw_driver = { - .owner = THIS_MODULE, - .name = "claw", - .ids = claw_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, -}; - -static ssize_t -claw_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) -{ - int err; - err = ccwgroup_create_from_string(claw_root_dev, - claw_group_driver.driver_id, - &claw_ccw_driver, 3, buf); - return err ? err : count; -} - -static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); - -static struct attribute *claw_group_attrs[] = { - &driver_attr_group.attr, - NULL, -}; - -static struct attribute_group claw_group_attr_group = { - .attrs = claw_group_attrs, -}; - -static const struct attribute_group *claw_group_attr_groups[] = { - &claw_group_attr_group, - NULL, -}; - /* * Key functions */ @@ -3369,11 +3326,7 @@ claw_remove_files(struct device *dev) static void __exit claw_cleanup(void) { - driver_remove_file(&claw_group_driver.driver, - &driver_attr_group); - ccwgroup_driver_unregister(&claw_group_driver); - ccw_driver_unregister(&claw_ccw_driver); - root_device_unregister(claw_root_dev); + unregister_cu3088_discipline(&claw_group_driver); claw_unregister_debug_facility(); pr_info("Driver unloaded\n"); @@ -3395,31 +3348,16 @@ claw_init(void) if (ret) { pr_err("Registering with the S/390 debug feature" " failed with error code %d\n", ret); - goto out_err; + return ret; } CLAW_DBF_TEXT(2, setup, "init_mod"); - claw_root_dev = root_device_register("qeth"); - ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0; - if (ret) - goto register_err; - ret = ccw_driver_register(&claw_ccw_driver); - if (ret) - goto ccw_err; - claw_group_driver.driver.groups = claw_group_attr_groups; - ret = ccwgroup_driver_register(&claw_group_driver); - if (ret) - goto ccwgroup_err; - return 0; - -ccwgroup_err: - ccw_driver_unregister(&claw_ccw_driver); -ccw_err: - root_device_unregister(claw_root_dev); -register_err: - CLAW_DBF_TEXT(2, setup, "init_bad"); - claw_unregister_debug_facility(); -out_err: - pr_err("Initializing the claw device driver failed\n"); + ret = register_cu3088_discipline(&claw_group_driver); + if (ret) { + CLAW_DBF_TEXT(2, setup, "init_bad"); + claw_unregister_debug_facility(); + pr_err("Registering with the cu3088 device driver failed " + "with error code %d\n", ret); + } return ret; } diff --git a/trunk/drivers/s390/net/claw.h b/trunk/drivers/s390/net/claw.h index 46d59a13db12..005072c420d3 100644 --- a/trunk/drivers/s390/net/claw.h +++ b/trunk/drivers/s390/net/claw.h @@ -129,18 +129,6 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level) } \ } while (0) -/** - * Enum for classifying detected devices. - */ -enum claw_channel_types { - /* Device is not a channel */ - claw_channel_type_none, - - /* Device is a CLAW channel device */ - claw_channel_type_claw -}; - - /******************************************************* * Define Control Blocks * * * diff --git a/trunk/drivers/s390/net/ctcm_fsms.c b/trunk/drivers/s390/net/ctcm_fsms.c index 70eb7f138414..4ded9ac2c5ef 100644 --- a/trunk/drivers/s390/net/ctcm_fsms.c +++ b/trunk/drivers/s390/net/ctcm_fsms.c @@ -44,6 +44,7 @@ #include #include "fsm.h" +#include "cu3088.h" #include "ctcm_dbug.h" #include "ctcm_main.h" diff --git a/trunk/drivers/s390/net/ctcm_fsms.h b/trunk/drivers/s390/net/ctcm_fsms.h index 046d077fabbb..2326aba9807a 100644 --- a/trunk/drivers/s390/net/ctcm_fsms.h +++ b/trunk/drivers/s390/net/ctcm_fsms.h @@ -39,6 +39,7 @@ #include #include "fsm.h" +#include "cu3088.h" #include "ctcm_main.h" /* diff --git a/trunk/drivers/s390/net/ctcm_main.c b/trunk/drivers/s390/net/ctcm_main.c index e35713dd0504..db054ed1a8cc 100644 --- a/trunk/drivers/s390/net/ctcm_main.c +++ b/trunk/drivers/s390/net/ctcm_main.c @@ -51,16 +51,12 @@ #include +#include "cu3088.h" #include "ctcm_fsms.h" #include "ctcm_main.h" /* Some common global variables */ -/** - * The root device for ctcm group devices - */ -static struct device *ctcm_root_dev; - /* * Linked list of all detected channels. */ @@ -250,7 +246,7 @@ static void channel_remove(struct channel *ch) * * returns Pointer to a channel or NULL if no matching channel available. */ -static struct channel *channel_get(enum ctcm_channel_types type, +static struct channel *channel_get(enum channel_types type, char *id, int direction) { struct channel *ch = channels; @@ -1346,7 +1342,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev) * * returns 0 on success, !0 on error. */ -static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, +static int add_channel(struct ccw_device *cdev, enum channel_types type, struct ctcm_priv *priv) { struct channel **c = &channels; @@ -1505,13 +1501,13 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, /* * Return type of a detected device. */ -static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id) +static enum channel_types get_channel_type(struct ccw_device_id *id) { - enum ctcm_channel_types type; - type = (enum ctcm_channel_types)id->driver_info; + enum channel_types type; + type = (enum channel_types)id->driver_info; - if (type == ctcm_channel_type_ficon) - type = ctcm_channel_type_escon; + if (type == channel_type_ficon) + type = channel_type_escon; return type; } @@ -1529,7 +1525,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) char read_id[CTCM_ID_SIZE]; char write_id[CTCM_ID_SIZE]; int direction; - enum ctcm_channel_types type; + enum channel_types type; struct ctcm_priv *priv; struct net_device *dev; struct ccw_device *cdev0; @@ -1724,11 +1720,6 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev) return 0; netif_device_detach(priv->channel[READ]->netdev); ctcm_close(priv->channel[READ]->netdev); - if (!wait_event_timeout(priv->fsm->wait_q, - fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { - netif_device_attach(priv->channel[READ]->netdev); - return -EBUSY; - } ccw_device_set_offline(gdev->cdev[1]); ccw_device_set_offline(gdev->cdev[0]); return 0; @@ -1753,22 +1744,6 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev) return rc; } -static struct ccw_device_id ctcm_ids[] = { - {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel}, - {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon}, - {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, ctcm_ids); - -static struct ccw_driver ctcm_ccw_driver = { - .owner = THIS_MODULE, - .name = "ctcm", - .ids = ctcm_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, -}; - static struct ccwgroup_driver ctcm_group_driver = { .owner = THIS_MODULE, .name = CTC_DRIVER_NAME, @@ -1783,33 +1758,6 @@ static struct ccwgroup_driver ctcm_group_driver = { .restore = ctcm_pm_resume, }; -static ssize_t -ctcm_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) -{ - int err; - - err = ccwgroup_create_from_string(ctcm_root_dev, - ctcm_group_driver.driver_id, - &ctcm_ccw_driver, 2, buf); - return err ? err : count; -} - -static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store); - -static struct attribute *ctcm_group_attrs[] = { - &driver_attr_group.attr, - NULL, -}; - -static struct attribute_group ctcm_group_attr_group = { - .attrs = ctcm_group_attrs, -}; - -static const struct attribute_group *ctcm_group_attr_groups[] = { - &ctcm_group_attr_group, - NULL, -}; /* * Module related routines @@ -1823,10 +1771,7 @@ static const struct attribute_group *ctcm_group_attr_groups[] = { */ static void __exit ctcm_exit(void) { - driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group); - ccwgroup_driver_unregister(&ctcm_group_driver); - ccw_driver_unregister(&ctcm_ccw_driver); - root_device_unregister(ctcm_root_dev); + unregister_cu3088_discipline(&ctcm_group_driver); ctcm_unregister_dbf_views(); pr_info("CTCM driver unloaded\n"); } @@ -1852,31 +1797,17 @@ static int __init ctcm_init(void) channels = NULL; ret = ctcm_register_dbf_views(); - if (ret) - goto out_err; - ctcm_root_dev = root_device_register("ctcm"); - ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0; - if (ret) - goto register_err; - ret = ccw_driver_register(&ctcm_ccw_driver); - if (ret) - goto ccw_err; - ctcm_group_driver.driver.groups = ctcm_group_attr_groups; - ret = ccwgroup_driver_register(&ctcm_group_driver); - if (ret) - goto ccwgroup_err; + if (ret) { + return ret; + } + ret = register_cu3088_discipline(&ctcm_group_driver); + if (ret) { + ctcm_unregister_dbf_views(); + pr_err("%s / register_cu3088_discipline failed, ret = %d\n", + __func__, ret); + return ret; + } print_banner(); - return 0; - -ccwgroup_err: - ccw_driver_unregister(&ctcm_ccw_driver); -ccw_err: - root_device_unregister(ctcm_root_dev); -register_err: - ctcm_unregister_dbf_views(); -out_err: - pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n", - __func__, ret); return ret; } diff --git a/trunk/drivers/s390/net/ctcm_main.h b/trunk/drivers/s390/net/ctcm_main.h index d34fa14f44e7..d925e732b7d8 100644 --- a/trunk/drivers/s390/net/ctcm_main.h +++ b/trunk/drivers/s390/net/ctcm_main.h @@ -16,6 +16,7 @@ #include #include "fsm.h" +#include "cu3088.h" #include "ctcm_dbug.h" #include "ctcm_mpc.h" @@ -65,23 +66,6 @@ ctcmpc_dumpit(buf, len); \ } while (0) -/** - * Enum for classifying detected devices - */ -enum ctcm_channel_types { - /* Device is not a channel */ - ctcm_channel_type_none, - - /* Device is a CTC/A */ - ctcm_channel_type_parallel, - - /* Device is a FICON channel */ - ctcm_channel_type_ficon, - - /* Device is a ESCON channel */ - ctcm_channel_type_escon -}; - /* * CCW commands, used in this driver. */ @@ -137,7 +121,7 @@ struct channel { * Type of this channel. * CTC/A or Escon for valid channels. */ - enum ctcm_channel_types type; + enum channel_types type; /* * Misc. flags. See CHANNEL_FLAGS_... below */ diff --git a/trunk/drivers/s390/net/ctcm_mpc.c b/trunk/drivers/s390/net/ctcm_mpc.c index 5978b390153f..781e18be7e8f 100644 --- a/trunk/drivers/s390/net/ctcm_mpc.c +++ b/trunk/drivers/s390/net/ctcm_mpc.c @@ -53,6 +53,7 @@ #include #include +#include "cu3088.h" #include "ctcm_mpc.h" #include "ctcm_main.h" #include "ctcm_fsms.h" diff --git a/trunk/drivers/s390/net/ctcm_sysfs.c b/trunk/drivers/s390/net/ctcm_sysfs.c index 738ad26c74a7..8452bb052d68 100644 --- a/trunk/drivers/s390/net/ctcm_sysfs.c +++ b/trunk/drivers/s390/net/ctcm_sysfs.c @@ -158,15 +158,6 @@ static ssize_t ctcm_proto_store(struct device *dev, return count; } -const char *ctcm_type[] = { - "not a channel", - "CTC/A", - "FICON channel", - "ESCON channel", - "unknown channel type", - "unsupported channel type", -}; - static ssize_t ctcm_type_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -177,7 +168,7 @@ static ssize_t ctcm_type_show(struct device *dev, return -ENODEV; return sprintf(buf, "%s\n", - ctcm_type[cgdev->cdev[0]->id.driver_info]); + cu3088_type[cgdev->cdev[0]->id.driver_info]); } static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); diff --git a/trunk/drivers/s390/net/cu3088.c b/trunk/drivers/s390/net/cu3088.c new file mode 100644 index 000000000000..48383459e99b --- /dev/null +++ b/trunk/drivers/s390/net/cu3088.c @@ -0,0 +1,148 @@ +/* + * CTC / LCS ccw_device driver + * + * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Arnd Bergmann + * Cornelia Huck + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include +#include +#include + +#include +#include + +#include "cu3088.h" + +const char *cu3088_type[] = { + "not a channel", + "CTC/A", + "ESCON channel", + "FICON channel", + "OSA LCS card", + "CLAW channel device", + "unknown channel type", + "unsupported channel type", +}; + +/* static definitions */ + +static struct ccw_device_id cu3088_ids[] = { + { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, + { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, + { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, + { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, + { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw }, + { /* end of list */ } +}; + +static struct ccw_driver cu3088_driver; + +static struct device *cu3088_root_dev; + +static ssize_t +group_write(struct device_driver *drv, const char *buf, size_t count) +{ + int ret; + struct ccwgroup_driver *cdrv; + + cdrv = to_ccwgroupdrv(drv); + if (!cdrv) + return -EINVAL; + ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id, + &cu3088_driver, 2, buf); + + return (ret == 0) ? count : ret; +} + +static DRIVER_ATTR(group, 0200, NULL, group_write); + +/* Register-unregister for ctc&lcs */ +int +register_cu3088_discipline(struct ccwgroup_driver *dcp) +{ + int rc; + + if (!dcp) + return -EINVAL; + + /* Register discipline.*/ + rc = ccwgroup_driver_register(dcp); + if (rc) + return rc; + + rc = driver_create_file(&dcp->driver, &driver_attr_group); + if (rc) + ccwgroup_driver_unregister(dcp); + + return rc; + +} + +void +unregister_cu3088_discipline(struct ccwgroup_driver *dcp) +{ + if (!dcp) + return; + + driver_remove_file(&dcp->driver, &driver_attr_group); + ccwgroup_driver_unregister(dcp); +} + +static struct ccw_driver cu3088_driver = { + .owner = THIS_MODULE, + .ids = cu3088_ids, + .name = "cu3088", + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, +}; + +/* module setup */ +static int __init +cu3088_init (void) +{ + int rc; + + cu3088_root_dev = root_device_register("cu3088"); + if (IS_ERR(cu3088_root_dev)) + return PTR_ERR(cu3088_root_dev); + rc = ccw_driver_register(&cu3088_driver); + if (rc) + root_device_unregister(cu3088_root_dev); + + return rc; +} + +static void __exit +cu3088_exit (void) +{ + ccw_driver_unregister(&cu3088_driver); + root_device_unregister(cu3088_root_dev); +} + +MODULE_DEVICE_TABLE(ccw,cu3088_ids); +MODULE_AUTHOR("Arnd Bergmann "); +MODULE_LICENSE("GPL"); + +module_init(cu3088_init); +module_exit(cu3088_exit); + +EXPORT_SYMBOL_GPL(cu3088_type); +EXPORT_SYMBOL_GPL(register_cu3088_discipline); +EXPORT_SYMBOL_GPL(unregister_cu3088_discipline); diff --git a/trunk/drivers/s390/net/cu3088.h b/trunk/drivers/s390/net/cu3088.h new file mode 100644 index 000000000000..d8558a7105a5 --- /dev/null +++ b/trunk/drivers/s390/net/cu3088.h @@ -0,0 +1,41 @@ +#ifndef _CU3088_H +#define _CU3088_H + +/** + * Enum for classifying detected devices. + */ +enum channel_types { + /* Device is not a channel */ + channel_type_none, + + /* Device is a CTC/A */ + channel_type_parallel, + + /* Device is a ESCON channel */ + channel_type_escon, + + /* Device is a FICON channel */ + channel_type_ficon, + + /* Device is a OSA2 card */ + channel_type_osa2, + + /* Device is a CLAW channel device */ + channel_type_claw, + + /* Device is a channel, but we don't know + * anything about it */ + channel_type_unknown, + + /* Device is an unsupported model */ + channel_type_unsupported, + + /* number of type entries */ + num_channel_types +}; + +extern const char *cu3088_type[num_channel_types]; +extern int register_cu3088_discipline(struct ccwgroup_driver *); +extern void unregister_cu3088_discipline(struct ccwgroup_driver *); + +#endif diff --git a/trunk/drivers/s390/net/fsm.c b/trunk/drivers/s390/net/fsm.c index cae48cbc5e96..2c1db8036b7c 100644 --- a/trunk/drivers/s390/net/fsm.c +++ b/trunk/drivers/s390/net/fsm.c @@ -27,7 +27,6 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_ return NULL; } strlcpy(this->name, name, sizeof(this->name)); - init_waitqueue_head(&this->wait_q); f = kzalloc(sizeof(fsm), order); if (f == NULL) { diff --git a/trunk/drivers/s390/net/fsm.h b/trunk/drivers/s390/net/fsm.h index 1e8b235d95b5..af679c10f1bd 100644 --- a/trunk/drivers/s390/net/fsm.h +++ b/trunk/drivers/s390/net/fsm.h @@ -66,7 +66,6 @@ typedef struct fsm_instance_t { char name[16]; void *userdata; int userint; - wait_queue_head_t wait_q; #if FSM_DEBUG_HISTORY int history_index; int history_size; @@ -198,7 +197,6 @@ fsm_newstate(fsm_instance *fi, int newstate) printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, fi->f->state_names[newstate]); #endif - wake_up(&fi->wait_q); } /** diff --git a/trunk/drivers/s390/net/lcs.c b/trunk/drivers/s390/net/lcs.c index f6cc46dc0501..5e46415d3e13 100644 --- a/trunk/drivers/s390/net/lcs.c +++ b/trunk/drivers/s390/net/lcs.c @@ -47,6 +47,7 @@ #include #include "lcs.h" +#include "cu3088.h" #if !defined(CONFIG_NET_ETHERNET) && \ @@ -59,11 +60,7 @@ */ static char version[] __initdata = "LCS driver"; - -/** - * the root device for lcs group devices - */ -static struct device *lcs_root_dev; +static char debug_buffer[255]; /** * Some prototypes. @@ -79,7 +76,6 @@ static int lcs_recovery(void *ptr); /** * Debug Facility Stuff */ -static char debug_buffer[255]; static debug_info_t *lcs_dbf_setup; static debug_info_t *lcs_dbf_trace; @@ -1972,15 +1968,6 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); -const char *lcs_type[] = { - "not a channel", - "2216 parallel", - "2216 channel", - "OSA LCS card", - "unknown channel type", - "unsupported channel type", -}; - static ssize_t lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1990,7 +1977,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) if (!cgdev) return -ENODEV; - return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]); + return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); } static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); @@ -2383,22 +2370,6 @@ static int lcs_restore(struct ccwgroup_device *gdev) return lcs_pm_resume(card); } -static struct ccw_device_id lcs_ids[] = { - {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, - {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, - {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, lcs_ids); - -static struct ccw_driver lcs_ccw_driver = { - .owner = THIS_MODULE, - .name = "lcs", - .ids = lcs_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, -}; - /** * LCS ccwgroup driver registration */ @@ -2418,33 +2389,6 @@ static struct ccwgroup_driver lcs_group_driver = { .restore = lcs_restore, }; -static ssize_t -lcs_driver_group_store(struct device_driver *ddrv, const char *buf, - size_t count) -{ - int err; - err = ccwgroup_create_from_string(lcs_root_dev, - lcs_group_driver.driver_id, - &lcs_ccw_driver, 2, buf); - return err ? err : count; -} - -static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store); - -static struct attribute *lcs_group_attrs[] = { - &driver_attr_group.attr, - NULL, -}; - -static struct attribute_group lcs_group_attr_group = { - .attrs = lcs_group_attrs, -}; - -static const struct attribute_group *lcs_group_attr_groups[] = { - &lcs_group_attr_group, - NULL, -}; - /** * LCS Module/Kernel initialization function */ @@ -2456,30 +2400,17 @@ __init lcs_init_module(void) pr_info("Loading %s\n", version); rc = lcs_register_debug_facility(); LCS_DBF_TEXT(0, setup, "lcsinit"); - if (rc) - goto out_err; - lcs_root_dev = root_device_register("lcs"); - rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0; - if (rc) - goto register_err; - rc = ccw_driver_register(&lcs_ccw_driver); - if (rc) - goto ccw_err; - lcs_group_driver.driver.groups = lcs_group_attr_groups; - rc = ccwgroup_driver_register(&lcs_group_driver); - if (rc) - goto ccwgroup_err; - return 0; + if (rc) { + pr_err("Initialization failed\n"); + return rc; + } -ccwgroup_err: - ccw_driver_unregister(&lcs_ccw_driver); -ccw_err: - root_device_unregister(lcs_root_dev); -register_err: - lcs_unregister_debug_facility(); -out_err: - pr_err("Initializing the lcs device driver failed\n"); - return rc; + rc = register_cu3088_discipline(&lcs_group_driver); + if (rc) { + pr_err("Initialization failed\n"); + return rc; + } + return 0; } @@ -2491,11 +2422,7 @@ __exit lcs_cleanup_module(void) { pr_info("Terminating lcs module.\n"); LCS_DBF_TEXT(0, trace, "cleanup"); - driver_remove_file(&lcs_group_driver.driver, - &driver_attr_group); - ccwgroup_driver_unregister(&lcs_group_driver); - ccw_driver_unregister(&lcs_ccw_driver); - root_device_unregister(lcs_root_dev); + unregister_cu3088_discipline(&lcs_group_driver); lcs_unregister_debug_facility(); } diff --git a/trunk/drivers/s390/net/lcs.h b/trunk/drivers/s390/net/lcs.h index 8c03392ac833..6d668642af27 100644 --- a/trunk/drivers/s390/net/lcs.h +++ b/trunk/drivers/s390/net/lcs.h @@ -36,24 +36,6 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level) #define CARD_FROM_DEV(cdev) \ (struct lcs_card *) dev_get_drvdata( \ &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); - -/** - * Enum for classifying detected devices. - */ -enum lcs_channel_types { - /* Device is not a channel */ - lcs_channel_type_none, - - /* Device is a 2216 channel */ - lcs_channel_type_parallel, - - /* Device is a 2216 channel */ - lcs_channel_type_2216, - - /* Device is a OSA2 card */ - lcs_channel_type_osa2 -}; - /** * CCW commands used in this driver */ diff --git a/trunk/drivers/s390/net/netiucv.c b/trunk/drivers/s390/net/netiucv.c index 395c04c2b00f..c84eadd3602a 100644 --- a/trunk/drivers/s390/net/netiucv.c +++ b/trunk/drivers/s390/net/netiucv.c @@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) if (single_flag) { if ((skb = skb_dequeue(&conn->commit_queue))) { atomic_dec(&skb->users); + dev_kfree_skb_any(skb); if (privptr) { privptr->stats.tx_packets++; privptr->stats.tx_bytes += (skb->len - NETIUCV_HDRLEN - - NETIUCV_HDRLEN); + - NETIUCV_HDRLEN); } - dev_kfree_skb_any(skb); } } conn->tx_buff->data = conn->tx_buff->head; diff --git a/trunk/drivers/s390/net/qeth_core.h b/trunk/drivers/s390/net/qeth_core.h index b232693378cd..e8f72d715eba 100644 --- a/trunk/drivers/s390/net/qeth_core.h +++ b/trunk/drivers/s390/net/qeth_core.h @@ -122,6 +122,7 @@ struct qeth_perf_stats { __u64 outbound_do_qdio_start_time; unsigned int outbound_do_qdio_cnt; unsigned int outbound_do_qdio_time; + /* eddp data */ unsigned int large_send_bytes; unsigned int large_send_cnt; unsigned int sg_skbs_sent; @@ -134,7 +135,6 @@ struct qeth_perf_stats { unsigned int sg_frags_rx; unsigned int sg_alloc_page_rx; unsigned int tx_csum; - unsigned int tx_lin; }; /* Routing stuff */ @@ -648,7 +648,6 @@ struct qeth_card_options { enum qeth_large_send_types large_send; int performance_stats; int rx_sg_cb; - enum qeth_ipa_isolation_modes isolation; }; /* @@ -777,6 +776,7 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); } +struct qeth_eddp_context; extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; const char *qeth_get_cardname_short(struct qeth_card *); @@ -836,6 +836,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); int qeth_mdio_read(struct net_device *, int, int); int qeth_snmp_command(struct qeth_card *, char __user *); +int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types); struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); @@ -855,7 +856,6 @@ void qeth_core_get_strings(struct net_device *, u32, u8 *); void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); -int qeth_set_access_ctrl_online(struct qeth_card *card); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/trunk/drivers/s390/net/qeth_core_main.c b/trunk/drivers/s390/net/qeth_core_main.c index d34804d5ece1..edee4dc6430c 100644 --- a/trunk/drivers/s390/net/qeth_core_main.c +++ b/trunk/drivers/s390/net/qeth_core_main.c @@ -270,6 +270,41 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) return qeth_alloc_buffer_pool(card); } +int qeth_set_large_send(struct qeth_card *card, + enum qeth_large_send_types type) +{ + int rc = 0; + + if (card->dev == NULL) { + card->options.large_send = type; + return 0; + } + if (card->state == CARD_STATE_UP) + netif_tx_disable(card->dev); + card->options.large_send = type; + switch (card->options.large_send) { + case QETH_LARGE_SEND_TSO: + if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + card->dev->features |= NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM; + } else { + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM); + card->options.large_send = QETH_LARGE_SEND_NO; + rc = -EOPNOTSUPP; + } + break; + default: /* includes QETH_LARGE_SEND_NO */ + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | + NETIF_F_HW_CSUM); + break; + } + if (card->state == CARD_STATE_UP) + netif_wake_queue(card->dev); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_set_large_send); + static int qeth_issue_next_read(struct qeth_card *card) { int rc; @@ -1044,7 +1079,6 @@ static void qeth_set_intial_options(struct qeth_card *card) card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; - card->options.isolation = ISOLATION_MODE_NONE; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) @@ -3355,156 +3389,6 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); -static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - struct qeth_set_access_ctrl *access_ctrl_req; - int rc; - - QETH_DBF_TEXT(TRACE, 4, "setaccb"); - - cmd = (struct qeth_ipa_cmd *) data; - access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; - QETH_DBF_TEXT_(SETUP, 2, "setaccb"); - QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); - QETH_DBF_TEXT_(SETUP, 2, "rc=%d", - cmd->data.setadapterparms.hdr.return_code); - switch (cmd->data.setadapterparms.hdr.return_code) { - case SET_ACCESS_CTRL_RC_SUCCESS: - case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: - case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: - { - card->options.isolation = access_ctrl_req->subcmd_code; - if (card->options.isolation == ISOLATION_MODE_NONE) { - dev_info(&card->gdev->dev, - "QDIO data connection isolation is deactivated\n"); - } else { - dev_info(&card->gdev->dev, - "QDIO data connection isolation is activated\n"); - } - QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - rc = 0; - break; - } - case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: - { - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - dev_err(&card->gdev->dev, "Adapter does not " - "support QDIO data connection isolation\n"); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = -EOPNOTSUPP; - break; - } - case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: - { - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - dev_err(&card->gdev->dev, - "Adapter is dedicated. " - "QDIO data connection isolation not supported\n"); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = -EOPNOTSUPP; - break; - } - case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: - { - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - dev_err(&card->gdev->dev, - "TSO does not permit QDIO data connection isolation\n"); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = -EPERM; - break; - } - default: - { - /* this should never happen */ - QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d" - "==UNKNOWN\n", - card->gdev->dev.kobj.name, - access_ctrl_req->subcmd_code, - cmd->data.setadapterparms.hdr.return_code); - - /* ensure isolation mode is "none" */ - card->options.isolation = ISOLATION_MODE_NONE; - rc = 0; - break; - } - } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); - return rc; -} - -static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, - enum qeth_ipa_isolation_modes isolation) -{ - int rc; - struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; - struct qeth_set_access_ctrl *access_ctrl_req; - - QETH_DBF_TEXT(TRACE, 4, "setacctl"); - - QETH_DBF_TEXT_(SETUP, 2, "setacctl"); - QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); - - iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, - sizeof(struct qeth_ipacmd_setadpparms_hdr) + - sizeof(struct qeth_set_access_ctrl)); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; - access_ctrl_req->subcmd_code = isolation; - - rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, - NULL); - QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); - return rc; -} - -int qeth_set_access_ctrl_online(struct qeth_card *card) -{ - int rc = 0; - - QETH_DBF_TEXT(TRACE, 4, "setactlo"); - - if (card->info.type == QETH_CARD_TYPE_OSAE && - qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { - rc = qeth_setadpparms_set_access_ctrl(card, - card->options.isolation); - if (rc) { - QETH_DBF_MESSAGE(3, - "IPA(SET_ACCESS_CTRL,%s,%d) sent failed", - card->gdev->dev.kobj.name, - rc); - } - } else if (card->options.isolation != ISOLATION_MODE_NONE) { - card->options.isolation = ISOLATION_MODE_NONE; - - dev_err(&card->gdev->dev, "Adapter does not " - "support QDIO data connection isolation\n"); - rc = -EOPNOTSUPP; - } - return rc; -} -EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); - void qeth_tx_timeout(struct net_device *dev) { struct qeth_card *card; @@ -3848,36 +3732,30 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, int qeth_core_hardsetup_card(struct qeth_card *card) { struct qdio_ssqd_desc *ssqd; - int retries = 0; + int retries = 3; int mpno = 0; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); retry: - if (retries) + if (retries < 3) { QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", dev_name(&card->gdev->dev)); - ccw_device_set_offline(CARD_DDEV(card)); - ccw_device_set_offline(CARD_WDEV(card)); - ccw_device_set_offline(CARD_RDEV(card)); - rc = ccw_device_set_online(CARD_RDEV(card)); - if (rc) - goto retriable; - rc = ccw_device_set_online(CARD_WDEV(card)); - if (rc) - goto retriable; - rc = ccw_device_set_online(CARD_DDEV(card)); - if (rc) - goto retriable; + ccw_device_set_offline(CARD_DDEV(card)); + ccw_device_set_offline(CARD_WDEV(card)); + ccw_device_set_offline(CARD_RDEV(card)); + ccw_device_set_online(CARD_RDEV(card)); + ccw_device_set_online(CARD_WDEV(card)); + ccw_device_set_online(CARD_DDEV(card)); + } rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); -retriable: if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break1"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - if (++retries > 3) + if (--retries < 0) goto out; else goto retry; @@ -4425,7 +4303,6 @@ static struct { {"tx do_QDIO time"}, {"tx do_QDIO count"}, {"tx csum"}, - {"tx lin"}, }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) @@ -4483,7 +4360,6 @@ void qeth_core_get_ethtool_stats(struct net_device *dev, data[31] = card->perf_stats.outbound_do_qdio_time; data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; - data[34] = card->perf_stats.tx_lin; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); diff --git a/trunk/drivers/s390/net/qeth_core_mpc.h b/trunk/drivers/s390/net/qeth_core_mpc.h index 52c03438dbec..eecb2ee62e85 100644 --- a/trunk/drivers/s390/net/qeth_core_mpc.h +++ b/trunk/drivers/s390/net/qeth_core_mpc.h @@ -234,19 +234,18 @@ enum qeth_ipa_setdelip_flags { /* SETADAPTER IPA Command: ****************************************************/ enum qeth_ipa_setadp_cmd { - IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L, - IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L, - IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L, - IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L, - IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L, - IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L, - IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L, - IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L, - IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L, - IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L, - IPA_SETADP_QUERY_CARD_INFO = 0x00000400L, - IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, - IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, + IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, + IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, + IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, + IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, + IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, + IPA_SETADP_SET_CONFIG_PARMS = 0x0020, + IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, + IPA_SETADP_SET_BROADCAST_MODE = 0x0080, + IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, + IPA_SETADP_SET_SNMP_CONTROL = 0x0200, + IPA_SETADP_QUERY_CARD_INFO = 0x0400, + IPA_SETADP_SET_PROMISC_MODE = 0x0800, }; enum qeth_ipa_mac_ops { CHANGE_ADDR_READ_MAC = 0, @@ -265,20 +264,6 @@ enum qeth_ipa_promisc_modes { SET_PROMISC_MODE_OFF = 0, SET_PROMISC_MODE_ON = 1, }; -enum qeth_ipa_isolation_modes { - ISOLATION_MODE_NONE = 0x00000000L, - ISOLATION_MODE_FWD = 0x00000001L, - ISOLATION_MODE_DROP = 0x00000002L, -}; -enum qeth_ipa_set_access_mode_rc { - SET_ACCESS_CTRL_RC_SUCCESS = 0x0000, - SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004, - SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008, - SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010, - SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014, - SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018, -}; - /* (SET)DELIP(M) IPA stuff ***************************************************/ struct qeth_ipacmd_setdelip4 { @@ -391,11 +376,6 @@ struct qeth_snmp_ureq { struct qeth_snmp_cmd cmd; } __attribute__((packed)); -/* SET_ACCESS_CONTROL: same format for request and reply */ -struct qeth_set_access_ctrl { - __u32 subcmd_code; -} __attribute__((packed)); - struct qeth_ipacmd_setadpparms_hdr { __u32 supp_hw_cmds; __u32 reserved1; @@ -414,7 +394,6 @@ struct qeth_ipacmd_setadpparms { struct qeth_query_cmds_supp query_cmds_supp; struct qeth_change_addr change_addr; struct qeth_snmp_cmd snmp; - struct qeth_set_access_ctrl set_access_ctrl; __u32 mode; } data; } __attribute__ ((packed)); diff --git a/trunk/drivers/s390/net/qeth_core_sys.c b/trunk/drivers/s390/net/qeth_core_sys.c index 9ff2b36fdc43..33505c2a0e3a 100644 --- a/trunk/drivers/s390/net/qeth_core_sys.c +++ b/trunk/drivers/s390/net/qeth_core_sys.c @@ -416,11 +416,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, qeth_dev_layer2_store); -#define ATTR_QETH_ISOLATION_NONE ("none") -#define ATTR_QETH_ISOLATION_FWD ("forward") -#define ATTR_QETH_ISOLATION_DROP ("drop") - -static ssize_t qeth_dev_isolation_show(struct device *dev, +static ssize_t qeth_dev_large_send_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qeth_card *card = dev_get_drvdata(dev); @@ -428,69 +424,44 @@ static ssize_t qeth_dev_isolation_show(struct device *dev, if (!card) return -EINVAL; - switch (card->options.isolation) { - case ISOLATION_MODE_NONE: - return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE); - case ISOLATION_MODE_FWD: - return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD); - case ISOLATION_MODE_DROP: - return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP); + switch (card->options.large_send) { + case QETH_LARGE_SEND_NO: + return sprintf(buf, "%s\n", "no"); + case QETH_LARGE_SEND_TSO: + return sprintf(buf, "%s\n", "TSO"); default: - return snprintf(buf, 5, "%s\n", "N/A"); + return sprintf(buf, "%s\n", "N/A"); } } -static ssize_t qeth_dev_isolation_store(struct device *dev, +static ssize_t qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - enum qeth_ipa_isolation_modes isolation; + enum qeth_large_send_types type; int rc = 0; - char *tmp, *curtoken; - curtoken = (char *) buf; - - if (!card) { - rc = -EINVAL; - goto out; - } - - /* check for unknown, too, in case we do not yet know who we are */ - if (card->info.type != QETH_CARD_TYPE_OSAE && - card->info.type != QETH_CARD_TYPE_UNKNOWN) { - rc = -EOPNOTSUPP; - dev_err(&card->gdev->dev, "Adapter does not " - "support QDIO data connection isolation\n"); - goto out; - } + char *tmp; - /* parse input into isolation mode */ - tmp = strsep(&curtoken, "\n"); - if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) { - isolation = ISOLATION_MODE_NONE; - } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) { - isolation = ISOLATION_MODE_FWD; - } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) { - isolation = ISOLATION_MODE_DROP; + if (!card) + return -EINVAL; + tmp = strsep((char **) &buf, "\n"); + if (!strcmp(tmp, "no")) { + type = QETH_LARGE_SEND_NO; + } else if (!strcmp(tmp, "TSO")) { + type = QETH_LARGE_SEND_TSO; } else { - rc = -EINVAL; - goto out; - } - rc = count; - - /* defer IP assist if device is offline (until discipline->set_online)*/ - card->options.isolation = isolation; - if (card->state == CARD_STATE_SOFTSETUP || - card->state == CARD_STATE_UP) { - int ipa_rc = qeth_set_access_ctrl_online(card); - if (ipa_rc != 0) - rc = ipa_rc; + return -EINVAL; } -out: - return rc; + if (card->options.large_send == type) + return count; + rc = qeth_set_large_send(card, type); + if (rc) + return rc; + return count; } -static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, - qeth_dev_isolation_store); +static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, + qeth_dev_large_send_store); static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) { @@ -611,7 +582,7 @@ static struct attribute *qeth_device_attrs[] = { &dev_attr_recover.attr, &dev_attr_performance_stats.attr, &dev_attr_layer2.attr, - &dev_attr_isolation.attr, + &dev_attr_large_send.attr, NULL, }; diff --git a/trunk/drivers/s390/net/qeth_l2_main.c b/trunk/drivers/s390/net/qeth_l2_main.c index 0b763396d5d1..b61d5c723c50 100644 --- a/trunk/drivers/s390/net/qeth_l2_main.c +++ b/trunk/drivers/s390/net/qeth_l2_main.c @@ -940,17 +940,30 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); recover_flag = card->state; + rc = ccw_device_set_online(CARD_RDEV(card)); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return -EIO; + } + rc = ccw_device_set_online(CARD_WDEV(card)); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return -EIO; + } + rc = ccw_device_set_online(CARD_DDEV(card)); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return -EIO; + } + rc = qeth_core_hardsetup_card(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - rc = -ENODEV; goto out_remove; } - if (!card->dev && qeth_l2_setup_netdev(card)) { - rc = -ENODEV; + if (!card->dev && qeth_l2_setup_netdev(card)) goto out_remove; - } if (card->info.type != QETH_CARD_TYPE_OSN) qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); @@ -970,14 +983,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) card->lan_online = 0; return 0; } - rc = -ENODEV; goto out_remove; } else card->lan_online = 1; if (card->info.type != QETH_CARD_TYPE_OSN) { - /* configure isolation level */ - qeth_set_access_ctrl_online(card); + qeth_set_large_send(card, card->options.large_send); qeth_l2_process_vlans(card, 0); } @@ -986,7 +997,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) rc = qeth_init_qdio_queues(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - rc = -ENODEV; goto out_remove; } card->state = CARD_STATE_SOFTSETUP; @@ -1008,7 +1018,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); return 0; - out_remove: card->use_hard_stop = 1; qeth_l2_stop_card(card, 0); @@ -1019,7 +1028,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) card->state = CARD_STATE_RECOVER; else card->state = CARD_STATE_DOWN; - return rc; + return -ENODEV; } static int qeth_l2_set_online(struct ccwgroup_device *gdev) diff --git a/trunk/drivers/s390/net/qeth_l3.h b/trunk/drivers/s390/net/qeth_l3.h index 321988fa9f7d..9f143c83bba3 100644 --- a/trunk/drivers/s390/net/qeth_l3.h +++ b/trunk/drivers/s390/net/qeth_l3.h @@ -60,7 +60,5 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, const u8 *); -int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types); -int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types); #endif /* __QETH_L3_H__ */ diff --git a/trunk/drivers/s390/net/qeth_l3_main.c b/trunk/drivers/s390/net/qeth_l3_main.c index fd1b6ed3721f..4ca28c16ca83 100644 --- a/trunk/drivers/s390/net/qeth_l3_main.c +++ b/trunk/drivers/s390/net/qeth_l3_main.c @@ -41,32 +41,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *, static int __qeth_l3_set_online(struct ccwgroup_device *, int); static int __qeth_l3_set_offline(struct ccwgroup_device *, int); -int qeth_l3_set_large_send(struct qeth_card *card, - enum qeth_large_send_types type) -{ - int rc = 0; - - card->options.large_send = type; - if (card->dev == NULL) - return 0; - - if (card->options.large_send == QETH_LARGE_SEND_TSO) { - if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { - card->dev->features |= NETIF_F_TSO | NETIF_F_SG | - NETIF_F_HW_CSUM; - } else { - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | - NETIF_F_HW_CSUM); - card->options.large_send = QETH_LARGE_SEND_NO; - rc = -EOPNOTSUPP; - } - } else { - card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | - NETIF_F_HW_CSUM); - card->options.large_send = QETH_LARGE_SEND_NO; - } - return rc; -} static int qeth_l3_isxdigit(char *buf) { @@ -1465,35 +1439,6 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card) return 0; } -int qeth_l3_set_rx_csum(struct qeth_card *card, - enum qeth_checksum_types csum_type) -{ - int rc = 0; - - if (card->options.checksum_type == HW_CHECKSUMMING) { - if ((csum_type != HW_CHECKSUMMING) && - (card->state != CARD_STATE_DOWN)) { - rc = qeth_l3_send_simple_setassparms(card, - IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); - if (rc) - return -EIO; - } - } else { - if (csum_type == HW_CHECKSUMMING) { - if (card->state != CARD_STATE_DOWN) { - if (!qeth_is_supported(card, - IPA_INBOUND_CHECKSUM)) - return -EPERM; - rc = qeth_l3_send_checksum_command(card); - if (rc) - return -EIO; - } - } - } - card->options.checksum_type = csum_type; - return rc; -} - static int qeth_l3_start_ipa_checksum(struct qeth_card *card) { int rc = 0; @@ -1561,8 +1506,6 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) static int qeth_l3_start_ipassists(struct qeth_card *card) { QETH_DBF_TEXT(TRACE, 3, "strtipas"); - - qeth_set_access_ctrl_online(card); /* go on*/ qeth_l3_start_ipa_arp_processing(card); /* go on*/ qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ qeth_l3_start_ipa_source_mac(card); /* go on*/ @@ -2741,24 +2684,6 @@ static void qeth_tx_csum(struct sk_buff *skb) *(__sum16 *)(skb->data + offset) = csum_fold(csum); } -static inline int qeth_l3_tso_elements(struct sk_buff *skb) -{ - unsigned long tcpd = (unsigned long)tcp_hdr(skb) + - tcp_hdr(skb)->doff * 4; - int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); - int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd); - elements += skb_shinfo(skb)->nr_frags; - return elements; -} - -static inline int qeth_l3_tso_check(struct sk_buff *skb) -{ - int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) - - (unsigned long)skb->data; - return (((unsigned long)skb->data & PAGE_MASK) != - (((unsigned long)skb->data + len) & PAGE_MASK)); -} - static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int rc; @@ -2852,21 +2777,16 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if (large_send == QETH_LARGE_SEND_TSO) { - if (qeth_l3_tso_elements(new_skb) + 1 > 16) { - if (skb_linearize(new_skb)) - goto tx_drop; - if (card->options.performance_stats) - card->perf_stats.tx_lin++; - } + if ((large_send == QETH_LARGE_SEND_TSO) && + ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { + if (skb_linearize(new_skb)) + goto tx_drop; } if ((large_send == QETH_LARGE_SEND_TSO) && (cast_type == RTN_UNSPEC)) { hdr = (struct qeth_hdr *)skb_push(new_skb, sizeof(struct qeth_hdr_tso)); - if (qeth_l3_tso_check(new_skb)) - QETH_DBF_MESSAGE(2, "tso skb misaligned\n"); memset(hdr, 0, sizeof(struct qeth_hdr_tso)); qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); qeth_tso_fill_header(card, hdr, new_skb); @@ -2983,28 +2903,46 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) { struct qeth_card *card = dev->ml_priv; + enum qeth_card_states old_state; enum qeth_checksum_types csum_type; + if ((card->state != CARD_STATE_UP) && + (card->state != CARD_STATE_DOWN)) + return -EPERM; + if (data) csum_type = HW_CHECKSUMMING; else csum_type = SW_CHECKSUMMING; - return qeth_l3_set_rx_csum(card, csum_type); + if (card->options.checksum_type != csum_type) { + old_state = card->state; + if (card->state == CARD_STATE_UP) + __qeth_l3_set_offline(card->gdev, 1); + card->options.checksum_type = csum_type; + if (old_state == CARD_STATE_UP) + __qeth_l3_set_online(card->gdev, 1); + } + return 0; } static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) { struct qeth_card *card = dev->ml_priv; - int rc = 0; if (data) { - rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO); + if (card->options.large_send == QETH_LARGE_SEND_NO) { + if (card->info.type == QETH_CARD_TYPE_IQD) + return -EPERM; + else + card->options.large_send = QETH_LARGE_SEND_TSO; + dev->features |= NETIF_F_TSO; + } } else { dev->features &= ~NETIF_F_TSO; card->options.large_send = QETH_LARGE_SEND_NO; } - return rc; + return 0; } static const struct ethtool_ops qeth_l3_ethtool_ops = { @@ -3120,7 +3058,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; - card->dev->gso_max_size = 15 * PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); return register_netdev(card->dev); @@ -3217,19 +3154,32 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); recover_flag = card->state; + rc = ccw_device_set_online(CARD_RDEV(card)); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return -EIO; + } + rc = ccw_device_set_online(CARD_WDEV(card)); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return -EIO; + } + rc = ccw_device_set_online(CARD_DDEV(card)); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return -EIO; + } + rc = qeth_core_hardsetup_card(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - rc = -ENODEV; goto out_remove; } qeth_l3_query_ipassists(card, QETH_PROT_IPV4); - if (!card->dev && qeth_l3_setup_netdev(card)) { - rc = -ENODEV; + if (!card->dev && qeth_l3_setup_netdev(card)) goto out_remove; - } card->state = CARD_STATE_HARDSETUP; qeth_print_status_message(card); @@ -3246,11 +3196,10 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) card->lan_online = 0; return 0; } - rc = -ENODEV; goto out_remove; } else card->lan_online = 1; - qeth_l3_set_large_send(card, card->options.large_send); + qeth_set_large_send(card, card->options.large_send); rc = qeth_l3_setadapter_parms(card); if (rc) @@ -3269,7 +3218,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) rc = qeth_init_qdio_queues(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - rc = -ENODEV; goto out_remove; } card->state = CARD_STATE_SOFTSETUP; @@ -3300,7 +3248,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) card->state = CARD_STATE_RECOVER; else card->state = CARD_STATE_DOWN; - return rc; + return -ENODEV; } static int qeth_l3_set_online(struct ccwgroup_device *gdev) diff --git a/trunk/drivers/s390/net/qeth_l3_sys.c b/trunk/drivers/s390/net/qeth_l3_sys.c index 3360b0941aa1..c144b9924d52 100644 --- a/trunk/drivers/s390/net/qeth_l3_sys.c +++ b/trunk/drivers/s390/net/qeth_l3_sys.c @@ -293,79 +293,31 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - enum qeth_checksum_types csum_type; char *tmp; - int rc; if (!card) return -EINVAL; + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + tmp = strsep((char **) &buf, "\n"); if (!strcmp(tmp, "sw_checksumming")) - csum_type = SW_CHECKSUMMING; + card->options.checksum_type = SW_CHECKSUMMING; else if (!strcmp(tmp, "hw_checksumming")) - csum_type = HW_CHECKSUMMING; + card->options.checksum_type = HW_CHECKSUMMING; else if (!strcmp(tmp, "no_checksumming")) - csum_type = NO_CHECKSUMMING; - else + card->options.checksum_type = NO_CHECKSUMMING; + else { return -EINVAL; - - rc = qeth_l3_set_rx_csum(card, csum_type); - if (rc) - return rc; + } return count; } static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, qeth_l3_dev_checksum_store); -static ssize_t qeth_l3_dev_large_send_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - switch (card->options.large_send) { - case QETH_LARGE_SEND_NO: - return sprintf(buf, "%s\n", "no"); - case QETH_LARGE_SEND_TSO: - return sprintf(buf, "%s\n", "TSO"); - default: - return sprintf(buf, "%s\n", "N/A"); - } -} - -static ssize_t qeth_l3_dev_large_send_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct qeth_card *card = dev_get_drvdata(dev); - enum qeth_large_send_types type; - int rc = 0; - char *tmp; - - if (!card) - return -EINVAL; - tmp = strsep((char **) &buf, "\n"); - if (!strcmp(tmp, "no")) - type = QETH_LARGE_SEND_NO; - else if (!strcmp(tmp, "TSO")) - type = QETH_LARGE_SEND_TSO; - else - return -EINVAL; - - if (card->options.large_send == type) - return count; - rc = qeth_l3_set_large_send(card, type); - if (rc) - return rc; - return count; -} - -static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show, - qeth_l3_dev_large_send_store); - static struct attribute *qeth_l3_device_attrs[] = { &dev_attr_route4.attr, &dev_attr_route6.attr, @@ -373,7 +325,6 @@ static struct attribute *qeth_l3_device_attrs[] = { &dev_attr_broadcast_mode.attr, &dev_attr_canonical_macaddr.attr, &dev_attr_checksumming.attr, - &dev_attr_large_send.attr, NULL, }; diff --git a/trunk/include/linux/if_ether.h b/trunk/include/linux/if_ether.h index 005e1525ab86..580b6004d00e 100644 --- a/trunk/include/linux/if_ether.h +++ b/trunk/include/linux/if_ether.h @@ -136,6 +136,10 @@ extern struct ctl_table ether_table[]; extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); +/* + * Display a 6 byte device address (MAC) in a readable format. + */ +extern char *print_mac(char *buf, const unsigned char *addr) __deprecated; #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_BUF_SIZE 18 #define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] diff --git a/trunk/include/linux/isdn_ppp.h b/trunk/include/linux/isdn_ppp.h index 8687a7dc0632..4c218ee7587a 100644 --- a/trunk/include/linux/isdn_ppp.h +++ b/trunk/include/linux/isdn_ppp.h @@ -157,7 +157,7 @@ typedef struct { typedef struct { int mp_mrru; /* unused */ - struct sk_buff * frags; /* fragments sl list -- use skb->next */ + struct sk_buff_head frags; /* fragments sl list */ long frames; /* number of frames in the frame list */ unsigned int seq; /* last processed packet seq #: any packets * with smaller seq # will be dropped diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index 97873e31661c..083b5989cecb 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -63,69 +63,30 @@ struct wireless_dev; #define HAVE_FREE_NETDEV /* free_netdev() */ #define HAVE_NETDEV_PRIV /* netdev_priv() */ -/* Backlog congestion levels */ -#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ -#define NET_RX_DROP 1 /* packet dropped */ - -/* - * Transmit return codes: transmit return codes originate from three different - * namespaces: - * - * - qdisc return codes - * - driver transmit return codes - * - errno values - * - * Drivers are allowed to return any one of those in their hard_start_xmit() - * function. Real network devices commonly used with qdiscs should only return - * the driver transmit return codes though - when qdiscs are used, the actual - * transmission happens asynchronously, so the value is not propagated to - * higher layers. Virtual network devices transmit synchronously, in this case - * the driver transmit return codes are consumed by dev_queue_xmit(), all - * others are propagated to higher layers. - */ +#define NET_XMIT_SUCCESS 0 +#define NET_XMIT_DROP 1 /* skb dropped */ +#define NET_XMIT_CN 2 /* congestion notification */ +#define NET_XMIT_POLICED 3 /* skb is shot by police */ +#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ -/* qdisc ->enqueue() return codes. */ -#define NET_XMIT_SUCCESS 0x00 -#define NET_XMIT_DROP 0x01 /* skb dropped */ -#define NET_XMIT_CN 0x02 /* congestion notification */ -#define NET_XMIT_POLICED 0x03 /* skb is shot by police */ -#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ +/* Backlog congestion levels */ +#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ +#define NET_RX_DROP 1 /* packet dropped */ /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It * indicates that the device will soon be dropping packets, or already drops * some packets of the same priority; prompting us to send less aggressively. */ -#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) +#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) /* Driver transmit return codes */ -#define NETDEV_TX_MASK 0xf0 - enum netdev_tx { - __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ - NETDEV_TX_OK = 0x00, /* driver took care of packet */ - NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ - NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ + NETDEV_TX_OK = 0, /* driver took care of packet */ + NETDEV_TX_BUSY, /* driver tx path was busy*/ + NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */ }; typedef enum netdev_tx netdev_tx_t; -/* - * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; - * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. - */ -static inline bool dev_xmit_complete(int rc) -{ - /* - * Positive cases with an skb consumed by a driver: - * - successful transmission (rc == NETDEV_TX_OK) - * - error while transmitting (rc < 0) - * - error while queueing to a different device (rc & NET_XMIT_MASK) - */ - if (likely(rc < NET_XMIT_MASK)) - return true; - - return false; -} - #endif #define MAX_ADDR_LEN 32 /* Largest hardware address length */ @@ -896,7 +857,7 @@ struct net_device { /* device index hash chain */ struct hlist_node index_hlist; - struct list_head link_watch_list; + struct net_device *link_watch_next; /* register/unregister state machine */ enum { NETREG_UNINITIALIZED=0, @@ -1132,16 +1093,6 @@ static inline struct net_device *next_net_device(struct net_device *dev) return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } -static inline struct net_device *next_net_device_rcu(struct net_device *dev) -{ - struct list_head *lh; - struct net *net; - - net = dev_net(dev); - lh = rcu_dereference(dev->dev_list.next); - return lh == &net->dev_base_head ? NULL : net_device_entry(lh); -} - static inline struct net_device *first_net_device(struct net *net) { return list_empty(&net->dev_base_head) ? NULL : @@ -1600,7 +1551,6 @@ static inline void dev_hold(struct net_device *dev) */ extern void linkwatch_fire_event(struct net_device *dev); -extern void linkwatch_forget_dev(struct net_device *dev); /** * netif_carrier_ok - test if carrier present @@ -1942,7 +1892,6 @@ extern void netdev_features_change(struct net_device *dev); extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); extern const struct net_device_stats *dev_get_stats(struct net_device *dev); -extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats); extern int netdev_max_backlog; extern int weight_p; diff --git a/trunk/include/linux/notifier.h b/trunk/include/linux/notifier.h index b0c3671d463c..29714b8441b1 100644 --- a/trunk/include/linux/notifier.h +++ b/trunk/include/linux/notifier.h @@ -202,7 +202,6 @@ static inline int notifier_to_errno(int ret) #define NETDEV_BONDING_OLDTYPE 0x000E #define NETDEV_BONDING_NEWTYPE 0x000F #define NETDEV_POST_INIT 0x0010 -#define NETDEV_UNREGISTER_PERNET 0x0011 #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/trunk/include/linux/tcp.h b/trunk/include/linux/tcp.h index 32d7d77b4a01..eeecb8547a2a 100644 --- a/trunk/include/linux/tcp.h +++ b/trunk/include/linux/tcp.h @@ -81,12 +81,6 @@ enum { TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) }; -/* - * TCP general constants - */ -#define TCP_MSS_DEFAULT 536U /* IPv4 (RFC1122, RFC2581) */ -#define TCP_MSS_DESIRED 1220U /* IPv6 (tunneled), EDNS0 (RFC3226) */ - /* TCP socket options */ #define TCP_NODELAY 1 /* Turn off Nagle's algorithm. */ #define TCP_MAXSEG 2 /* Limit MSS */ diff --git a/trunk/include/net/inet_hashtables.h b/trunk/include/net/inet_hashtables.h index 41cbddd25b70..5b698b3b463d 100644 --- a/trunk/include/net/inet_hashtables.h +++ b/trunk/include/net/inet_hashtables.h @@ -92,8 +92,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib) return read_pnet(&ib->ib_net); } -#define inet_bind_bucket_for_each(tb, pos, head) \ - hlist_for_each_entry(tb, pos, head, node) +#define inet_bind_bucket_for_each(tb, node, head) \ + hlist_for_each_entry(tb, node, head, node) struct inet_bind_hashbucket { spinlock_t lock; diff --git a/trunk/include/net/inetpeer.h b/trunk/include/net/inetpeer.h index 87b1df0d4d8c..35ad7b930467 100644 --- a/trunk/include/net/inetpeer.h +++ b/trunk/include/net/inetpeer.h @@ -17,15 +17,15 @@ struct inet_peer { /* group together avl_left,avl_right,v4daddr to speedup lookups */ struct inet_peer *avl_left, *avl_right; __be32 v4daddr; /* peer's address */ - __u32 avl_height; + __u16 avl_height; + __u16 ip_id_count; /* IP ID for the next packet */ struct list_head unused; __u32 dtime; /* the time of last use of not * referenced entries */ atomic_t refcnt; atomic_t rid; /* Frag reception counter */ - atomic_t ip_id_count; /* IP ID for the next packet */ __u32 tcp_ts; - __u32 tcp_ts_stamp; + unsigned long tcp_ts_stamp; }; void inet_initpeers(void) __init; @@ -36,11 +36,17 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create); /* can be called from BH context or outside */ extern void inet_putpeer(struct inet_peer *p); +extern spinlock_t inet_peer_idlock; /* can be called with or without local BH being disabled */ static inline __u16 inet_getid(struct inet_peer *p, int more) { - more++; - return atomic_add_return(more, &p->ip_id_count) - more; + __u16 id; + + spin_lock_bh(&inet_peer_idlock); + id = p->ip_id_count; + p->ip_id_count += 1 + more; + spin_unlock_bh(&inet_peer_idlock); + return id; } #endif /* _NET_INETPEER_H */ diff --git a/trunk/include/net/phonet/pn_dev.h b/trunk/include/net/phonet/pn_dev.h index d7b989ca3d63..afa7defceb14 100644 --- a/trunk/include/net/phonet/pn_dev.h +++ b/trunk/include/net/phonet/pn_dev.h @@ -25,7 +25,7 @@ struct phonet_device_list { struct list_head list; - struct mutex lock; + spinlock_t lock; }; struct phonet_device_list *phonet_device_list(struct net *net); diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index cd2e18778f81..6e5f0e0c7967 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -1980,7 +1980,7 @@ void sctp_assoc_set_primary(struct sctp_association *, void sctp_assoc_del_nonprimary_peers(struct sctp_association *, struct sctp_transport *); int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, - sctp_scope_t, gfp_t); + gfp_t); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, gfp_t gfp); diff --git a/trunk/include/net/tcp.h b/trunk/include/net/tcp.h index 325bfcf5c934..bf20f88fd033 100644 --- a/trunk/include/net/tcp.h +++ b/trunk/include/net/tcp.h @@ -62,6 +62,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ #define TCP_MIN_MSS 88U +/* Minimal RCV_MSS. */ +#define TCP_MIN_RCVMSS 536U + /* The least MTU to use for probing */ #define TCP_BASE_MSS 512 diff --git a/trunk/kernel/time/clocksource.c b/trunk/kernel/time/clocksource.c index 4a310906b3e8..5e18c6ab2c6a 100644 --- a/trunk/kernel/time/clocksource.c +++ b/trunk/kernel/time/clocksource.c @@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc, tc->cycle_last = cc->read(cc); tc->nsec = start_tstamp; } -EXPORT_SYMBOL_GPL(timecounter_init); +EXPORT_SYMBOL(timecounter_init); /** * timecounter_read_delta - get nanoseconds since last call of this function @@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc) return nsec; } -EXPORT_SYMBOL_GPL(timecounter_read); +EXPORT_SYMBOL(timecounter_read); u64 timecounter_cyc2time(struct timecounter *tc, cycle_t cycle_tstamp) @@ -105,7 +105,7 @@ u64 timecounter_cyc2time(struct timecounter *tc, return nsec; } -EXPORT_SYMBOL_GPL(timecounter_cyc2time); +EXPORT_SYMBOL(timecounter_cyc2time); /*[Clocksource internal variables]--------- * curr_clocksource: diff --git a/trunk/kernel/time/timecompare.c b/trunk/kernel/time/timecompare.c index 96ff643a5a59..71e7f1a19156 100644 --- a/trunk/kernel/time/timecompare.c +++ b/trunk/kernel/time/timecompare.c @@ -40,7 +40,7 @@ ktime_t timecompare_transform(struct timecompare *sync, return ns_to_ktime(nsec); } -EXPORT_SYMBOL_GPL(timecompare_transform); +EXPORT_SYMBOL(timecompare_transform); int timecompare_offset(struct timecompare *sync, s64 *offset, @@ -131,7 +131,7 @@ int timecompare_offset(struct timecompare *sync, return used; } -EXPORT_SYMBOL_GPL(timecompare_offset); +EXPORT_SYMBOL(timecompare_offset); void __timecompare_update(struct timecompare *sync, u64 source_tstamp) @@ -188,4 +188,4 @@ void __timecompare_update(struct timecompare *sync, } } } -EXPORT_SYMBOL_GPL(__timecompare_update); +EXPORT_SYMBOL(__timecompare_update); diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index d9cb020029b9..39f8d0120104 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -41,7 +41,7 @@ /* Global VLAN variables */ -int vlan_net_id __read_mostly; +int vlan_net_id; /* Our listing of VLAN group(s) */ static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; diff --git a/trunk/net/8021q/vlan.h b/trunk/net/8021q/vlan.h index 5685296017e9..68f9290e6837 100644 --- a/trunk/net/8021q/vlan.h +++ b/trunk/net/8021q/vlan.h @@ -16,21 +16,6 @@ struct vlan_priority_tci_mapping { struct vlan_priority_tci_mapping *next; }; - -/** - * struct vlan_rx_stats - VLAN percpu rx stats - * @rx_packets: number of received packets - * @rx_bytes: number of received bytes - * @multicast: number of received multicast packets - * @rx_errors: number of errors - */ -struct vlan_rx_stats { - unsigned long rx_packets; - unsigned long rx_bytes; - unsigned long multicast; - unsigned long rx_errors; -}; - /** * struct vlan_dev_info - VLAN private device data * @nr_ingress_mappings: number of ingress priority mappings @@ -44,7 +29,6 @@ struct vlan_rx_stats { * @dent: proc dir entry * @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX * @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX - * @vlan_rx_stats: ptr to percpu rx stats */ struct vlan_dev_info { unsigned int nr_ingress_mappings; @@ -61,7 +45,6 @@ struct vlan_dev_info { struct proc_dir_entry *dent; unsigned long cnt_inc_headroom_on_tx; unsigned long cnt_encap_on_xmit; - struct vlan_rx_stats *vlan_rx_stats; }; static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) diff --git a/trunk/net/8021q/vlan_core.c b/trunk/net/8021q/vlan_core.c index e75a2f3b10af..8d5ca2ac4f8d 100644 --- a/trunk/net/8021q/vlan_core.c +++ b/trunk/net/8021q/vlan_core.c @@ -14,7 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, if (skb_bond_should_drop(skb)) goto drop; - __vlan_hwaccel_put_tag(skb, vlan_tci); + skb->vlan_tci = vlan_tci; skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); if (!skb->dev) @@ -31,7 +31,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx); int vlan_hwaccel_do_receive(struct sk_buff *skb) { struct net_device *dev = skb->dev; - struct vlan_rx_stats *rx_stats; + struct net_device_stats *stats; skb->dev = vlan_dev_info(dev)->real_dev; netif_nit_deliver(skb); @@ -40,17 +40,15 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb) skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); skb->vlan_tci = 0; - rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, - smp_processor_id()); - - rx_stats->rx_packets++; - rx_stats->rx_bytes += skb->len; + stats = &dev->stats; + stats->rx_packets++; + stats->rx_bytes += skb->len; switch (skb->pkt_type) { case PACKET_BROADCAST: break; case PACKET_MULTICAST: - rx_stats->multicast++; + stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. @@ -85,7 +83,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, if (skb_bond_should_drop(skb)) goto drop; - __vlan_hwaccel_put_tag(skb, vlan_tci); + skb->vlan_tci = vlan_tci; skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); if (!skb->dev) diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index de0dc6bacbe8..790fd55ec318 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -140,7 +140,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; - struct vlan_rx_stats *rx_stats; + struct net_device_stats *stats; u16 vlan_id; u16 vlan_tci; @@ -163,10 +163,9 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, goto err_unlock; } - rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, - smp_processor_id()); - rx_stats->rx_packets++; - rx_stats->rx_bytes += skb->len; + stats = &skb->dev->stats; + stats->rx_packets++; + stats->rx_bytes += skb->len; skb_pull_rcsum(skb, VLAN_HLEN); @@ -181,7 +180,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, break; case PACKET_MULTICAST: - rx_stats->multicast++; + stats->multicast++; break; case PACKET_OTHERHOST: @@ -201,7 +200,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, skb = vlan_check_reorder_header(skb); if (!skb) { - rx_stats->rx_errors++; + stats->rx_errors++; goto err_unlock; } @@ -333,7 +332,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, } else txq->tx_dropped++; - return ret; + return NETDEV_TX_OK; } static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, @@ -359,7 +358,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, } else txq->tx_dropped++; - return ret; + return NETDEV_TX_OK; } static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) @@ -732,11 +731,6 @@ static int vlan_dev_init(struct net_device *dev) subclass = 1; vlan_dev_set_lockdep_class(dev, subclass); - - vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); - if (!vlan_dev_info(dev)->vlan_rx_stats) - return -ENOMEM; - return 0; } @@ -746,8 +740,6 @@ static void vlan_dev_uninit(struct net_device *dev) struct vlan_dev_info *vlan = vlan_dev_info(dev); int i; - free_percpu(vlan->vlan_rx_stats); - vlan->vlan_rx_stats = NULL; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; @@ -783,31 +775,6 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev) return dev_ethtool_get_flags(vlan->real_dev); } -static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) -{ - struct net_device_stats *stats = &dev->stats; - - dev_txq_stats_fold(dev, stats); - - if (vlan_dev_info(dev)->vlan_rx_stats) { - struct vlan_rx_stats *p, rx = {0}; - int i; - - for_each_possible_cpu(i) { - p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); - rx.rx_packets += p->rx_packets; - rx.rx_bytes += p->rx_bytes; - rx.rx_errors += p->rx_errors; - rx.multicast += p->multicast; - } - stats->rx_packets = rx.rx_packets; - stats->rx_bytes = rx.rx_bytes; - stats->rx_errors = rx.rx_errors; - stats->multicast = rx.multicast; - } - return stats; -} - static const struct ethtool_ops vlan_ethtool_ops = { .get_settings = vlan_ethtool_get_settings, .get_drvinfo = vlan_ethtool_get_drvinfo, @@ -830,7 +797,6 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, - .ndo_get_stats = vlan_dev_get_stats, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, @@ -854,7 +820,6 @@ static const struct net_device_ops vlan_netdev_accel_ops = { .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_do_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, - .ndo_get_stats = vlan_dev_get_stats, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, diff --git a/trunk/net/atm/ioctl.c b/trunk/net/atm/ioctl.c index 2ea40995dced..4da8892ced5f 100644 --- a/trunk/net/atm/ioctl.c +++ b/trunk/net/atm/ioctl.c @@ -191,181 +191,8 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } #ifdef CONFIG_COMPAT -/* - * FIXME: - * The compat_ioctl handling is duplicated, using both these conversion - * routines and the compat argument to the actual handlers. Both - * versions are somewhat incomplete and should be merged, e.g. by - * moving the ioctl number translation into the actual handlers and - * killing the conversion code. - * - * -arnd, November 2009 - */ -#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc) -#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf) -#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc) -#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc) -#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc) -#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc) -#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc) -#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc) -#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc) -#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc) -#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc) -#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc) -#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc) -#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc) -#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc) -#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc) -#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc) - -static struct { - unsigned int cmd32; - unsigned int cmd; -} atm_ioctl_map[] = { - { ATM_GETLINKRATE32, ATM_GETLINKRATE }, - { ATM_GETNAMES32, ATM_GETNAMES }, - { ATM_GETTYPE32, ATM_GETTYPE }, - { ATM_GETESI32, ATM_GETESI }, - { ATM_GETADDR32, ATM_GETADDR }, - { ATM_RSTADDR32, ATM_RSTADDR }, - { ATM_ADDADDR32, ATM_ADDADDR }, - { ATM_DELADDR32, ATM_DELADDR }, - { ATM_GETCIRANGE32, ATM_GETCIRANGE }, - { ATM_SETCIRANGE32, ATM_SETCIRANGE }, - { ATM_SETESI32, ATM_SETESI }, - { ATM_SETESIF32, ATM_SETESIF }, - { ATM_GETSTAT32, ATM_GETSTAT }, - { ATM_GETSTATZ32, ATM_GETSTATZ }, - { ATM_GETLOOP32, ATM_GETLOOP }, - { ATM_SETLOOP32, ATM_SETLOOP }, - { ATM_QUERYLOOP32, ATM_QUERYLOOP }, -}; - -#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map) - -static int do_atm_iobuf(struct socket *sock, unsigned int cmd, - unsigned long arg) -{ - struct atm_iobuf __user *iobuf; - struct compat_atm_iobuf __user *iobuf32; - u32 data; - void __user *datap; - int len, err; - - iobuf = compat_alloc_user_space(sizeof(*iobuf)); - iobuf32 = compat_ptr(arg); - - if (get_user(len, &iobuf32->length) || - get_user(data, &iobuf32->buffer)) - return -EFAULT; - datap = compat_ptr(data); - if (put_user(len, &iobuf->length) || - put_user(datap, &iobuf->buffer)) - return -EFAULT; - - err = do_vcc_ioctl(sock, cmd, (unsigned long) iobuf, 0); - - if (!err) { - if (copy_in_user(&iobuf32->length, &iobuf->length, - sizeof(int))) - err = -EFAULT; - } - - return err; -} - -static int do_atmif_sioc(struct socket *sock, unsigned int cmd, - unsigned long arg) -{ - struct atmif_sioc __user *sioc; - struct compat_atmif_sioc __user *sioc32; - u32 data; - void __user *datap; - int err; - - sioc = compat_alloc_user_space(sizeof(*sioc)); - sioc32 = compat_ptr(arg); - - if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) - || get_user(data, &sioc32->arg)) - return -EFAULT; - datap = compat_ptr(data); - if (put_user(datap, &sioc->arg)) - return -EFAULT; - - err = do_vcc_ioctl(sock, cmd, (unsigned long) sioc, 0); - - if (!err) { - if (copy_in_user(&sioc32->length, &sioc->length, - sizeof(int))) - err = -EFAULT; - } - return err; -} - -static int do_atm_ioctl(struct socket *sock, unsigned int cmd32, - unsigned long arg) -{ - int i; - unsigned int cmd = 0; - - switch (cmd32) { - case SONET_GETSTAT: - case SONET_GETSTATZ: - case SONET_GETDIAG: - case SONET_SETDIAG: - case SONET_CLRDIAG: - case SONET_SETFRAMING: - case SONET_GETFRAMING: - case SONET_GETFRSENSE: - return do_atmif_sioc(sock, cmd32, arg); - } - - for (i = 0; i < NR_ATM_IOCTL; i++) { - if (cmd32 == atm_ioctl_map[i].cmd32) { - cmd = atm_ioctl_map[i].cmd; - break; - } - } - if (i == NR_ATM_IOCTL) - return -EINVAL; - - switch (cmd) { - case ATM_GETNAMES: - return do_atm_iobuf(sock, cmd, arg); - - case ATM_GETLINKRATE: - case ATM_GETTYPE: - case ATM_GETESI: - case ATM_GETADDR: - case ATM_RSTADDR: - case ATM_ADDADDR: - case ATM_DELADDR: - case ATM_GETCIRANGE: - case ATM_SETCIRANGE: - case ATM_SETESI: - case ATM_SETESIF: - case ATM_GETSTAT: - case ATM_GETSTATZ: - case ATM_GETLOOP: - case ATM_SETLOOP: - case ATM_QUERYLOOP: - return do_atmif_sioc(sock, cmd, arg); - } - - return -EINVAL; -} - -int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) +int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { - int ret; - - ret = do_vcc_ioctl(sock, cmd, arg, 1); - if (ret != -ENOIOCTLCMD) - return ret; - - return do_atm_ioctl(sock, cmd, arg); + return do_vcc_ioctl(sock, cmd, arg, 1); } #endif diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index b7c4224f4e7d..a9750984f772 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -211,7 +211,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) conn->type = type; conn->mode = HCI_CM_ACTIVE; conn->state = BT_OPEN; - conn->auth_type = HCI_AT_GENERAL_BONDING; conn->power_save = 1; conn->disc_timeout = HCI_DISCONN_TIMEOUT; diff --git a/trunk/net/bluetooth/l2cap.c b/trunk/net/bluetooth/l2cap.c index 80d929842f04..ff0233df6246 100644 --- a/trunk/net/bluetooth/l2cap.c +++ b/trunk/net/bluetooth/l2cap.c @@ -2206,7 +2206,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct l2cap_conf_req *req = data; - struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM }; void *ptr = req->data; BT_DBG("sk %p", sk); @@ -2395,10 +2395,6 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; pi->conf_state |= L2CAP_CONF_MODE_DONE; - - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); - break; case L2CAP_MODE_STREAMING: @@ -2406,10 +2402,6 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) pi->max_pdu_size = rfc.max_pdu_size; pi->conf_state |= L2CAP_CONF_MODE_DONE; - - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, - sizeof(rfc), (unsigned long) &rfc); - break; default: @@ -2419,6 +2411,9 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) rfc.mode = pi->mode; } + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + if (result == L2CAP_CONF_SUCCESS) pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; } diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 9977288583b8..bf629ac08b87 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -79,7 +79,6 @@ #include #include #include -#include #include #include #include @@ -197,7 +196,7 @@ EXPORT_SYMBOL(dev_base_lock); static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) { unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); - return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; + return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)]; } static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) @@ -893,8 +892,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf) free_page((unsigned long) inuse); } - if (buf != name) - snprintf(buf, IFNAMSIZ, name, i); + snprintf(buf, IFNAMSIZ, name, i); if (!__dev_get_by_name(net, buf)) return i; @@ -934,21 +932,6 @@ int dev_alloc_name(struct net_device *dev, const char *name) } EXPORT_SYMBOL(dev_alloc_name); -static int dev_get_valid_name(struct net *net, const char *name, char *buf, - bool fmt) -{ - if (!dev_valid_name(name)) - return -EINVAL; - - if (fmt && strchr(name, '%')) - return __dev_alloc_name(net, name, buf); - else if (__dev_get_by_name(net, name)) - return -EEXIST; - else if (buf != name) - strlcpy(buf, name, IFNAMSIZ); - - return 0; -} /** * dev_change_name - change name of a device @@ -972,14 +955,22 @@ int dev_change_name(struct net_device *dev, const char *newname) if (dev->flags & IFF_UP) return -EBUSY; + if (!dev_valid_name(newname)) + return -EINVAL; + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) return 0; memcpy(oldname, dev->name, IFNAMSIZ); - err = dev_get_valid_name(net, newname, dev->name, 1); - if (err < 0) - return err; + if (strchr(newname, '%')) { + err = dev_alloc_name(dev, newname); + if (err < 0) + return err; + } else if (__dev_get_by_name(net, newname)) + return -EEXIST; + else + strlcpy(dev->name, newname, IFNAMSIZ); rollback: /* For now only devices in the initial network namespace @@ -1007,15 +998,14 @@ int dev_change_name(struct net_device *dev, const char *newname) ret = notifier_to_errno(ret); if (ret) { - /* err >= 0 after dev_alloc_name() or stores the first errno */ - if (err >= 0) { - err = ret; - memcpy(dev->name, oldname, IFNAMSIZ); - goto rollback; - } else { + if (err) { printk(KERN_ERR "%s: name change rollback failed: %d.\n", dev->name, ret); + } else { + err = ret; + memcpy(dev->name, oldname, IFNAMSIZ); + goto rollback; } } @@ -1352,7 +1342,6 @@ int register_netdevice_notifier(struct notifier_block *nb) nb->notifier_call(nb, NETDEV_DOWN, dev); } nb->notifier_call(nb, NETDEV_UNREGISTER, dev); - nb->notifier_call(nb, NETDEV_UNREGISTER_PERNET, dev); } } @@ -1767,7 +1756,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { const struct net_device_ops *ops = dev->netdev_ops; - int rc = NETDEV_TX_OK; + int rc; if (likely(!skb->next)) { if (!list_empty(&ptype_all)) @@ -1815,8 +1804,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, nskb->next = NULL; rc = ops->ndo_start_xmit(nskb, dev); if (unlikely(rc != NETDEV_TX_OK)) { - if (rc & ~NETDEV_TX_MASK) - goto out_kfree_gso_skb; nskb->next = skb->next; skb->next = nskb; return rc; @@ -1826,12 +1813,11 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_BUSY; } while (skb->next); -out_kfree_gso_skb: - if (likely(skb->next == NULL)) - skb->destructor = DEV_GSO_CB(skb)->destructor; + skb->destructor = DEV_GSO_CB(skb)->destructor; + out_kfree_skb: kfree_skb(skb); - return rc; + return NETDEV_TX_OK; } static u32 skb_tx_hashrnd; @@ -1858,20 +1844,6 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) } EXPORT_SYMBOL(skb_tx_hash); -static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) -{ - if (unlikely(queue_index >= dev->real_num_tx_queues)) { - if (net_ratelimit()) { - WARN(1, "%s selects TX queue %d, but " - "real number of TX queues is %d\n", - dev->name, queue_index, - dev->real_num_tx_queues); - } - return 0; - } - return queue_index; -} - static struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) { @@ -1885,7 +1857,6 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, if (ops->ndo_select_queue) { queue_index = ops->ndo_select_queue(dev, skb); - queue_index = dev_cap_txqueue(dev, queue_index); } else { queue_index = 0; if (dev->real_num_tx_queues > 1) @@ -2031,8 +2002,8 @@ int dev_queue_xmit(struct sk_buff *skb) HARD_TX_LOCK(dev, txq, cpu); if (!netif_tx_queue_stopped(txq)) { - rc = dev_hard_start_xmit(skb, dev, txq); - if (dev_xmit_complete(rc)) { + rc = NET_XMIT_SUCCESS; + if (!dev_hard_start_xmit(skb, dev, txq)) { HARD_TX_UNLOCK(dev, txq); goto out; } @@ -4730,8 +4701,7 @@ static void net_set_todo(struct net_device *dev) static void rollback_registered_many(struct list_head *head) { - struct net_device *dev, *aux, *fdev; - LIST_HEAD(pernet_list); + struct net_device *dev; BUG_ON(dev_boot_phase); ASSERT_RTNL(); @@ -4789,24 +4759,8 @@ static void rollback_registered_many(struct list_head *head) synchronize_net(); - list_for_each_entry_safe(dev, aux, head, unreg_list) { - int new_net = 1; - list_for_each_entry(fdev, &pernet_list, unreg_list) { - if (dev_net(dev) == dev_net(fdev)) { - new_net = 0; - dev_put(dev); - break; - } - } - if (new_net) - list_move(&dev->unreg_list, &pernet_list); - } - - list_for_each_entry_safe(dev, aux, &pernet_list, unreg_list) { - call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev); - list_move(&dev->unreg_list, head); + list_for_each_entry(dev, head, unreg_list) dev_put(dev); - } } static void rollback_registered(struct net_device *dev) @@ -4891,6 +4845,8 @@ EXPORT_SYMBOL(netdev_fix_features); int register_netdevice(struct net_device *dev) { + struct hlist_head *head; + struct hlist_node *p; int ret; struct net *net = dev_net(dev); @@ -4919,14 +4875,26 @@ int register_netdevice(struct net_device *dev) } } - ret = dev_get_valid_name(net, dev->name, dev->name, 0); - if (ret) + if (!dev_valid_name(dev->name)) { + ret = -EINVAL; goto err_uninit; + } dev->ifindex = dev_new_index(net); if (dev->iflink == -1) dev->iflink = dev->ifindex; + /* Check for existence of name */ + head = dev_name_hash(net, dev->name); + hlist_for_each(p, head) { + struct net_device *d + = hlist_entry(p, struct net_device, name_hlist); + if (!strncmp(d->name, dev->name, IFNAMSIZ)) { + ret = -EEXIST; + goto err_uninit; + } + } + /* Fix illegal checksum combinations */ if ((dev->features & NETIF_F_HW_CSUM) && (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { @@ -5079,8 +5047,6 @@ static void netdev_wait_allrefs(struct net_device *dev) { unsigned long rebroadcast_time, warning_time; - linkwatch_forget_dev(dev); - rebroadcast_time = warning_time = jiffies; while (atomic_read(&dev->refcnt) != 0) { if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { @@ -5088,8 +5054,6 @@ static void netdev_wait_allrefs(struct net_device *dev) /* Rebroadcast unregister notification */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - /* don't resend NETDEV_UNREGISTER_PERNET, _PERNET users - * should have already handle it the first time */ if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { @@ -5184,32 +5148,6 @@ void netdev_run_todo(void) } } -/** - * dev_txq_stats_fold - fold tx_queues stats - * @dev: device to get statistics from - * @stats: struct net_device_stats to hold results - */ -void dev_txq_stats_fold(const struct net_device *dev, - struct net_device_stats *stats) -{ - unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; - unsigned int i; - struct netdev_queue *txq; - - for (i = 0; i < dev->num_tx_queues; i++) { - txq = netdev_get_tx_queue(dev, i); - tx_bytes += txq->tx_bytes; - tx_packets += txq->tx_packets; - tx_dropped += txq->tx_dropped; - } - if (tx_bytes || tx_packets || tx_dropped) { - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_packets; - stats->tx_dropped = tx_dropped; - } -} -EXPORT_SYMBOL(dev_txq_stats_fold); - /** * dev_get_stats - get network device statistics * @dev: device to get statistics from @@ -5224,9 +5162,25 @@ const struct net_device_stats *dev_get_stats(struct net_device *dev) if (ops->ndo_get_stats) return ops->ndo_get_stats(dev); - - dev_txq_stats_fold(dev, &dev->stats); - return &dev->stats; + else { + unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; + struct net_device_stats *stats = &dev->stats; + unsigned int i; + struct netdev_queue *txq; + + for (i = 0; i < dev->num_tx_queues; i++) { + txq = netdev_get_tx_queue(dev, i); + tx_bytes += txq->tx_bytes; + tx_packets += txq->tx_packets; + tx_dropped += txq->tx_dropped; + } + if (tx_bytes || tx_packets || tx_dropped) { + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_packets; + stats->tx_dropped = tx_dropped; + } + return stats; + } } EXPORT_SYMBOL(dev_get_stats); @@ -5307,7 +5261,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); - INIT_LIST_HEAD(&dev->link_watch_list); dev->priv_flags = IFF_XMIT_DST_RELEASE; setup(dev); strcpy(dev->name, name); @@ -5402,10 +5355,6 @@ EXPORT_SYMBOL(unregister_netdevice_queue); * unregister_netdevice_many - unregister many devices * @head: list of devices * - * WARNING: Calling this modifies the given list - * (in rollback_registered_many). It may change the order of the elements - * in the list. However, you can assume it does not add or delete elements - * to/from the list. */ void unregister_netdevice_many(struct list_head *head) { @@ -5454,6 +5403,8 @@ EXPORT_SYMBOL(unregister_netdev); int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) { + char buf[IFNAMSIZ]; + const char *destname; int err; ASSERT_RTNL(); @@ -5486,11 +5437,20 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char * we can use it in the destination network namespace. */ err = -EEXIST; - if (__dev_get_by_name(net, dev->name)) { + destname = dev->name; + if (__dev_get_by_name(net, destname)) { /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(net, pat, dev->name, 1)) + if (!dev_valid_name(pat)) + goto out; + if (strchr(pat, '%')) { + if (__dev_alloc_name(net, pat, buf) < 0) + goto out; + destname = buf; + } else + destname = pat; + if (__dev_get_by_name(net, destname)) goto out; } @@ -5514,7 +5474,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char this device. They should clean all the things. */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev); - call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev); /* * Flush the unicast and multicast chains @@ -5527,6 +5486,10 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* Actually switch the network namespace */ dev_net_set(dev, net); + /* Assign the new device name */ + if (destname != dev->name) + strcpy(dev->name, destname); + /* If there is an ifindex conflict assign a new one */ if (__dev_get_by_index(net, dev->ifindex)) { int iflink = (dev->iflink == dev->ifindex); diff --git a/trunk/net/core/link_watch.c b/trunk/net/core/link_watch.c index 5910b555a54a..bf8f7af699d7 100644 --- a/trunk/net/core/link_watch.c +++ b/trunk/net/core/link_watch.c @@ -35,7 +35,7 @@ static unsigned long linkwatch_nextevent; static void linkwatch_event(struct work_struct *dummy); static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); -static LIST_HEAD(lweventlist); +static struct net_device *lweventlist; static DEFINE_SPINLOCK(lweventlist_lock); static unsigned char default_operstate(const struct net_device *dev) @@ -89,10 +89,8 @@ static void linkwatch_add_event(struct net_device *dev) unsigned long flags; spin_lock_irqsave(&lweventlist_lock, flags); - if (list_empty(&dev->link_watch_list)) { - list_add_tail(&dev->link_watch_list, &lweventlist); - dev_hold(dev); - } + dev->link_watch_next = lweventlist; + lweventlist = dev; spin_unlock_irqrestore(&lweventlist_lock, flags); } @@ -135,35 +133,9 @@ static void linkwatch_schedule_work(int urgent) } -static void linkwatch_do_dev(struct net_device *dev) -{ - /* - * Make sure the above read is complete since it can be - * rewritten as soon as we clear the bit below. - */ - smp_mb__before_clear_bit(); - - /* We are about to handle this device, - * so new events can be accepted - */ - clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); - - rfc2863_policy(dev); - if (dev->flags & IFF_UP) { - if (netif_carrier_ok(dev)) - dev_activate(dev); - else - dev_deactivate(dev); - - netdev_state_change(dev); - } - dev_put(dev); -} - static void __linkwatch_run_queue(int urgent_only) { - struct net_device *dev; - LIST_HEAD(wrk); + struct net_device *next; /* * Limit the number of linkwatch events to one @@ -181,40 +153,46 @@ static void __linkwatch_run_queue(int urgent_only) clear_bit(LW_URGENT, &linkwatch_flags); spin_lock_irq(&lweventlist_lock); - list_splice_init(&lweventlist, &wrk); + next = lweventlist; + lweventlist = NULL; + spin_unlock_irq(&lweventlist_lock); - while (!list_empty(&wrk)) { + while (next) { + struct net_device *dev = next; - dev = list_first_entry(&wrk, struct net_device, link_watch_list); - list_del_init(&dev->link_watch_list); + next = dev->link_watch_next; if (urgent_only && !linkwatch_urgent_event(dev)) { - list_add_tail(&dev->link_watch_list, &lweventlist); + linkwatch_add_event(dev); continue; } - spin_unlock_irq(&lweventlist_lock); - linkwatch_do_dev(dev); - spin_lock_irq(&lweventlist_lock); - } - if (!list_empty(&lweventlist)) - linkwatch_schedule_work(0); - spin_unlock_irq(&lweventlist_lock); -} - -void linkwatch_forget_dev(struct net_device *dev) -{ - unsigned long flags; - int clean = 0; + /* + * Make sure the above read is complete since it can be + * rewritten as soon as we clear the bit below. + */ + smp_mb__before_clear_bit(); + + /* We are about to handle this device, + * so new events can be accepted + */ + clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); + + rfc2863_policy(dev); + if (dev->flags & IFF_UP) { + if (netif_carrier_ok(dev)) + dev_activate(dev); + else + dev_deactivate(dev); + + netdev_state_change(dev); + } - spin_lock_irqsave(&lweventlist_lock, flags); - if (!list_empty(&dev->link_watch_list)) { - list_del_init(&dev->link_watch_list); - clean = 1; + dev_put(dev); } - spin_unlock_irqrestore(&lweventlist_lock, flags); - if (clean) - linkwatch_do_dev(dev); + + if (lweventlist) + linkwatch_schedule_work(0); } @@ -238,6 +216,8 @@ void linkwatch_fire_event(struct net_device *dev) bool urgent = linkwatch_urgent_event(dev); if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { + dev_hold(dev); + linkwatch_add_event(dev); } else if (!urgent) return; diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index 941bac907484..80a96166df39 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -493,9 +493,6 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) { struct skb_shared_info *shinfo; - if (irqs_disabled()) - return 0; - if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) return 0; diff --git a/trunk/net/decnet/dn_dev.c b/trunk/net/decnet/dn_dev.c index 6c916e2b8a84..b5ef237c8a74 100644 --- a/trunk/net/decnet/dn_dev.c +++ b/trunk/net/decnet/dn_dev.c @@ -68,7 +68,7 @@ extern struct neigh_table dn_neigh_table; */ __le16 decnet_address = 0; -static DEFINE_SPINLOCK(dndev_lock); +static DEFINE_RWLOCK(dndev_lock); static struct net_device *decnet_default_device; static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); @@ -557,8 +557,7 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg) struct net_device *dn_dev_get_default(void) { struct net_device *dev; - - spin_lock(&dndev_lock); + read_lock(&dndev_lock); dev = decnet_default_device; if (dev) { if (dev->dn_ptr) @@ -566,8 +565,7 @@ struct net_device *dn_dev_get_default(void) else dev = NULL; } - spin_unlock(&dndev_lock); - + read_unlock(&dndev_lock); return dev; } @@ -577,15 +575,13 @@ int dn_dev_set_default(struct net_device *dev, int force) int rv = -EBUSY; if (!dev->dn_ptr) return -ENODEV; - - spin_lock(&dndev_lock); + write_lock(&dndev_lock); if (force || decnet_default_device == NULL) { old = decnet_default_device; decnet_default_device = dev; rv = 0; } - spin_unlock(&dndev_lock); - + write_unlock(&dndev_lock); if (old) dev_put(old); return rv; @@ -593,14 +589,13 @@ int dn_dev_set_default(struct net_device *dev, int force) static void dn_dev_check_default(struct net_device *dev) { - spin_lock(&dndev_lock); + write_lock(&dndev_lock); if (dev == decnet_default_device) { decnet_default_device = NULL; } else { dev = NULL; } - spin_unlock(&dndev_lock); - + write_unlock(&dndev_lock); if (dev) dev_put(dev); } @@ -833,17 +828,13 @@ static int dn_dev_get_first(struct net_device *dev, __le16 *addr) struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; struct dn_ifaddr *ifa; int rv = -ENODEV; - if (dn_db == NULL) goto out; - - rtnl_lock(); ifa = dn_db->ifa_list; if (ifa != NULL) { *addr = ifa->ifa_local; rv = 0; } - rtnl_unlock(); out: return rv; } diff --git a/trunk/net/ethernet/eth.c b/trunk/net/ethernet/eth.c index dd3db88f8f0a..5a883affecd3 100644 --- a/trunk/net/ethernet/eth.c +++ b/trunk/net/ethernet/eth.c @@ -393,3 +393,10 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len) return ((ssize_t) l); } EXPORT_SYMBOL(sysfs_format_mac); + +char *print_mac(char *buf, const unsigned char *addr) +{ + _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN); + return buf; +} +EXPORT_SYMBOL(print_mac); diff --git a/trunk/net/ieee802154/wpan-class.c b/trunk/net/ieee802154/wpan-class.c index 268691256a6d..38bac70cca10 100644 --- a/trunk/net/ieee802154/wpan-class.c +++ b/trunk/net/ieee802154/wpan-class.c @@ -205,7 +205,7 @@ static int __init wpan_phy_class_init(void) err: return rc; } -subsys_initcall(wpan_phy_class_init); +module_init(wpan_phy_class_init); static void __exit wpan_phy_class_exit(void) { diff --git a/trunk/net/ipv4/devinet.c b/trunk/net/ipv4/devinet.c index 7620382058a0..c2045f9615da 100644 --- a/trunk/net/ipv4/devinet.c +++ b/trunk/net/ipv4/devinet.c @@ -1174,54 +1174,39 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); - int h, s_h; - int idx, s_idx; - int ip_idx, s_ip_idx; + int idx, ip_idx; struct net_device *dev; struct in_device *in_dev; struct in_ifaddr *ifa; - struct hlist_head *head; - struct hlist_node *node; + int s_ip_idx, s_idx = cb->args[0]; - s_h = cb->args[0]; - s_idx = idx = cb->args[1]; - s_ip_idx = ip_idx = cb->args[2]; - - for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { - idx = 0; - head = &net->dev_index_head[h]; - rcu_read_lock(); - hlist_for_each_entry_rcu(dev, node, head, index_hlist) { - if (idx < s_idx) - goto cont; - if (idx > s_idx) - s_ip_idx = 0; - in_dev = __in_dev_get_rcu(dev); - if (!in_dev) - goto cont; + s_ip_idx = ip_idx = cb->args[1]; + idx = 0; + for_each_netdev(net, dev) { + if (idx < s_idx) + goto cont; + if (idx > s_idx) + s_ip_idx = 0; + in_dev = __in_dev_get_rtnl(dev); + if (!in_dev) + goto cont; - for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; - ifa = ifa->ifa_next, ip_idx++) { - if (ip_idx < s_ip_idx) - continue; - if (inet_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).pid, + for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; + ifa = ifa->ifa_next, ip_idx++) { + if (ip_idx < s_ip_idx) + continue; + if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, - RTM_NEWADDR, NLM_F_MULTI) <= 0) { - rcu_read_unlock(); - goto done; - } - } -cont: - idx++; + RTM_NEWADDR, NLM_F_MULTI) <= 0) + goto done; } - rcu_read_unlock(); +cont: + idx++; } done: - cb->args[0] = h; - cb->args[1] = idx; - cb->args[2] = ip_idx; + cb->args[0] = idx; + cb->args[1] = ip_idx; return skb->len; } diff --git a/trunk/net/ipv4/fib_frontend.c b/trunk/net/ipv4/fib_frontend.c index 6c1e56aef1f4..816e2180bd60 100644 --- a/trunk/net/ipv4/fib_frontend.c +++ b/trunk/net/ipv4/fib_frontend.c @@ -895,11 +895,11 @@ static void nl_fib_lookup_exit(struct net *net) net->ipv4.fibnl = NULL; } -static void fib_disable_ip(struct net_device *dev, int force, int delay) +static void fib_disable_ip(struct net_device *dev, int force) { if (fib_sync_down_dev(dev, force)) fib_flush(dev_net(dev)); - rt_cache_flush(dev_net(dev), delay); + rt_cache_flush(dev_net(dev), 0); arp_ifdown(dev); } @@ -922,7 +922,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, /* Last address was deleted from this interface. Disable IP. */ - fib_disable_ip(dev, 1, 0); + fib_disable_ip(dev, 1); } else { rt_cache_flush(dev_net(dev), -1); } @@ -937,7 +937,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo struct in_device *in_dev = __in_dev_get_rtnl(dev); if (event == NETDEV_UNREGISTER) { - fib_disable_ip(dev, 2, -1); + fib_disable_ip(dev, 2); return NOTIFY_DONE; } @@ -955,11 +955,10 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo rt_cache_flush(dev_net(dev), -1); break; case NETDEV_DOWN: - fib_disable_ip(dev, 0, 0); + fib_disable_ip(dev, 0); break; case NETDEV_CHANGEMTU: case NETDEV_CHANGE: - case NETDEV_UNREGISTER_PERNET: rt_cache_flush(dev_net(dev), 0); break; } diff --git a/trunk/net/ipv4/igmp.c b/trunk/net/ipv4/igmp.c index 6110c6d6e613..bd24f6560a49 100644 --- a/trunk/net/ipv4/igmp.c +++ b/trunk/net/ipv4/igmp.c @@ -2313,8 +2313,7 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) state->in_dev = NULL; for_each_netdev_rcu(net, state->dev) { struct in_device *in_dev; - - in_dev = __in_dev_get_rcu(state->dev); + in_dev = in_dev_get(state->dev); if (!in_dev) continue; read_lock(&in_dev->mc_list_lock); @@ -2324,6 +2323,7 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) break; } read_unlock(&in_dev->mc_list_lock); + in_dev_put(in_dev); } return im; } @@ -2333,15 +2333,16 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); im = im->next; while (!im) { - if (likely(state->in_dev != NULL)) + if (likely(state->in_dev != NULL)) { read_unlock(&state->in_dev->mc_list_lock); - - state->dev = next_net_device_rcu(state->dev); + in_dev_put(state->in_dev); + } + state->dev = next_net_device(state->dev); if (!state->dev) { state->in_dev = NULL; break; } - state->in_dev = __in_dev_get_rcu(state->dev); + state->in_dev = in_dev_get(state->dev); if (!state->in_dev) continue; read_lock(&state->in_dev->mc_list_lock); @@ -2383,6 +2384,7 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v) struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); if (likely(state->in_dev != NULL)) { read_unlock(&state->in_dev->mc_list_lock); + in_dev_put(state->in_dev); state->in_dev = NULL; } state->dev = NULL; @@ -2462,7 +2464,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) state->im = NULL; for_each_netdev_rcu(net, state->dev) { struct in_device *idev; - idev = __in_dev_get_rcu(state->dev); + idev = in_dev_get(state->dev); if (unlikely(idev == NULL)) continue; read_lock(&idev->mc_list_lock); @@ -2478,6 +2480,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) spin_unlock_bh(&im->lock); } read_unlock(&idev->mc_list_lock); + in_dev_put(idev); } return psf; } @@ -2491,15 +2494,16 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l spin_unlock_bh(&state->im->lock); state->im = state->im->next; while (!state->im) { - if (likely(state->idev != NULL)) + if (likely(state->idev != NULL)) { read_unlock(&state->idev->mc_list_lock); - - state->dev = next_net_device_rcu(state->dev); + in_dev_put(state->idev); + } + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; goto out; } - state->idev = __in_dev_get_rcu(state->dev); + state->idev = in_dev_get(state->dev); if (!state->idev) continue; read_lock(&state->idev->mc_list_lock); @@ -2551,6 +2555,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v) } if (likely(state->idev != NULL)) { read_unlock(&state->idev->mc_list_lock); + in_dev_put(state->idev); state->idev = NULL; } state->dev = NULL; diff --git a/trunk/net/ipv4/inetpeer.c b/trunk/net/ipv4/inetpeer.c index 6bcfe52a9c87..b1fbe18feb5a 100644 --- a/trunk/net/ipv4/inetpeer.c +++ b/trunk/net/ipv4/inetpeer.c @@ -67,6 +67,9 @@ * ip_id_count: idlock */ +/* Exported for inet_getid inline function. */ +DEFINE_SPINLOCK(inet_peer_idlock); + static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height @@ -387,7 +390,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) n->v4daddr = daddr; atomic_set(&n->refcnt, 1); atomic_set(&n->rid, 0); - atomic_set(&n->ip_id_count, secure_ip_id(daddr)); + n->ip_id_count = secure_ip_id(daddr); n->tcp_ts_stamp = 0; write_lock_bh(&peer_pool_lock); diff --git a/trunk/net/ipv4/ip_gre.c b/trunk/net/ipv4/ip_gre.c index c5f6af5d0f34..a7de9e3a8f18 100644 --- a/trunk/net/ipv4/ip_gre.c +++ b/trunk/net/ipv4/ip_gre.c @@ -125,7 +125,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev); #define HASH_SIZE 16 -static int ipgre_net_id __read_mostly; +static int ipgre_net_id; struct ipgre_net { struct ip_tunnel *tunnels[4][HASH_SIZE]; diff --git a/trunk/net/ipv4/ipip.c b/trunk/net/ipv4/ipip.c index 7242ffcc44e5..c5b1f71c3cd8 100644 --- a/trunk/net/ipv4/ipip.c +++ b/trunk/net/ipv4/ipip.c @@ -119,7 +119,7 @@ #define HASH_SIZE 16 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) -static int ipip_net_id __read_mostly; +static int ipip_net_id; struct ipip_net { struct ip_tunnel *tunnels_r_l[HASH_SIZE]; struct ip_tunnel *tunnels_r[HASH_SIZE]; diff --git a/trunk/net/ipv4/ipmr.c b/trunk/net/ipv4/ipmr.c index 54596f73eff5..ef4ee45b928f 100644 --- a/trunk/net/ipv4/ipmr.c +++ b/trunk/net/ipv4/ipmr.c @@ -494,10 +494,8 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) return -EINVAL; } - if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) { - dev_put(dev); + if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) return -EADDRNOTAVAIL; - } IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; ip_rt_multicast_event(in_dev); diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 4284ceef7945..ff258b57680b 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -2852,7 +2852,7 @@ static int rt_fill_info(struct net *net, error = rt->u.dst.error; expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; if (rt->peer) { - id = atomic_read(&rt->peer->ip_id_count) & 0xffff; + id = rt->peer->ip_id_count; if (rt->peer->tcp_ts_stamp) { ts = rt->peer->tcp_ts; tsage = get_seconds() - rt->peer->tcp_ts_stamp; diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index 524f9760193b..e0cfa633680a 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -1183,9 +1183,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) #if TCP_DEBUG struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); - WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), - KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", - tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); + WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); #endif if (inet_csk_ack_scheduled(sk)) { @@ -1432,13 +1430,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* Now that we have two receive queues this * shouldn't happen. */ - if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), - KERN_INFO "recvmsg bug: copied %X " - "seq %X rcvnxt %X fl %X\n", *seq, - TCP_SKB_CB(skb)->seq, tp->rcv_nxt, - flags)) + if (before(*seq, TCP_SKB_CB(skb)->seq)) { + printk(KERN_INFO "recvmsg bug: copied %X " + "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); break; - + } offset = *seq - TCP_SKB_CB(skb)->seq; if (tcp_hdr(skb)->syn) offset--; @@ -1447,9 +1443,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (tcp_hdr(skb)->fin) goto found_fin_ok; WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " - "copied %X seq %X rcvnxt %X fl %X\n", - *seq, TCP_SKB_CB(skb)->seq, - tp->rcv_nxt, flags); + "copied %X seq %X\n", *seq, + TCP_SKB_CB(skb)->seq); } /* Well, if we have backlog, try to process it now yet. */ diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index cc306ac6eb51..be0c5bf7bfca 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); - if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || + if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows @@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk) unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); - hint = min(hint, TCP_MSS_DEFAULT); + hint = min(hint, TCP_MIN_RCVMSS); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index df18ce04f41e..657ae334f125 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * when trying new connection. */ if (peer != NULL && - (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { + peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) { tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; tp->rx_opt.ts_recent = peer->tcp_ts; } @@ -217,7 +217,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (inet->opt) inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; - tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; + tp->rx_opt.mss_clamp = 536; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket @@ -1268,7 +1268,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; tcp_clear_options(&tmp_opt); - tmp_opt.mss_clamp = TCP_MSS_DEFAULT; + tmp_opt.mss_clamp = 536; tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; tcp_parse_options(skb, &tmp_opt, 0, dst); @@ -1308,7 +1308,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_death_row.sysctl_tw_recycle && (peer = rt_get_peer((struct rtable *)dst)) != NULL && peer->v4daddr == saddr) { - if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && + if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && (s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); @@ -1727,9 +1727,9 @@ int tcp_v4_remember_stamp(struct sock *sk) if (peer) { if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || - ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && - peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { - peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; + (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && + peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { + peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; peer->tcp_ts = tp->rx_opt.ts_recent; } if (release_it) @@ -1748,9 +1748,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || - ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL && - peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { - peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; + (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() && + peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { + peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; peer->tcp_ts = tcptw->tw_ts_recent; } inet_putpeer(peer); @@ -1815,7 +1815,7 @@ static int tcp_v4_init_sock(struct sock *sk) */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; - tp->mss_cache = TCP_MSS_DEFAULT; + tp->mss_cache = 536; tp->reordering = sysctl_tcp_reordering; icsk->icsk_ca_ops = &tcp_init_congestion_ops; diff --git a/trunk/net/ipv4/tcp_minisocks.c b/trunk/net/ipv4/tcp_minisocks.c index 4be22280e6b3..a9d34e224cb6 100644 --- a/trunk/net/ipv4/tcp_minisocks.c +++ b/trunk/net/ipv4/tcp_minisocks.c @@ -476,7 +476,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, if (newtp->af_specific->md5_lookup(sk, newsk)) newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; #endif - if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) + if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newtp->rx_opt.mss_clamp = req->mss; TCP_ECN_openreq_child(newtp, req); diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index 522bdc77206c..0ab39fedd2dc 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -3481,114 +3481,91 @@ enum addr_type_t ANYCAST_ADDR, }; -/* called with rcu_read_lock() */ -static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, - struct netlink_callback *cb, enum addr_type_t type, - int s_ip_idx, int *p_ip_idx) -{ - struct inet6_ifaddr *ifa; - struct ifmcaddr6 *ifmca; - struct ifacaddr6 *ifaca; - int err = 1; - int ip_idx = *p_ip_idx; - - read_lock_bh(&idev->lock); - switch (type) { - case UNICAST_ADDR: - /* unicast address incl. temp addr */ - for (ifa = idev->addr_list; ifa; - ifa = ifa->if_next, ip_idx++) { - if (ip_idx < s_ip_idx) - continue; - err = inet6_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_NEWADDR, - NLM_F_MULTI); - if (err <= 0) - break; - } - break; - case MULTICAST_ADDR: - /* multicast address */ - for (ifmca = idev->mc_list; ifmca; - ifmca = ifmca->next, ip_idx++) { - if (ip_idx < s_ip_idx) - continue; - err = inet6_fill_ifmcaddr(skb, ifmca, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_GETMULTICAST, - NLM_F_MULTI); - if (err <= 0) - break; - } - break; - case ANYCAST_ADDR: - /* anycast address */ - for (ifaca = idev->ac_list; ifaca; - ifaca = ifaca->aca_next, ip_idx++) { - if (ip_idx < s_ip_idx) - continue; - err = inet6_fill_ifacaddr(skb, ifaca, - NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, - RTM_GETANYCAST, - NLM_F_MULTI); - if (err <= 0) - break; - } - break; - default: - break; - } - read_unlock_bh(&idev->lock); - *p_ip_idx = ip_idx; - return err; -} - static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, enum addr_type_t type) { - struct net *net = sock_net(skb->sk); - int h, s_h; int idx, ip_idx; int s_idx, s_ip_idx; + int err = 1; struct net_device *dev; - struct inet6_dev *idev; - struct hlist_head *head; - struct hlist_node *node; + struct inet6_dev *idev = NULL; + struct inet6_ifaddr *ifa; + struct ifmcaddr6 *ifmca; + struct ifacaddr6 *ifaca; + struct net *net = sock_net(skb->sk); - s_h = cb->args[0]; - s_idx = idx = cb->args[1]; - s_ip_idx = ip_idx = cb->args[2]; + s_idx = cb->args[0]; + s_ip_idx = ip_idx = cb->args[1]; - rcu_read_lock(); - for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { - idx = 0; - head = &net->dev_index_head[h]; - hlist_for_each_entry_rcu(dev, node, head, index_hlist) { - if (idx < s_idx) - goto cont; - if (idx > s_idx) - s_ip_idx = 0; - ip_idx = 0; - if ((idev = __in6_dev_get(dev)) == NULL) - goto cont; + idx = 0; + for_each_netdev(net, dev) { + if (idx < s_idx) + goto cont; + if (idx > s_idx) + s_ip_idx = 0; + ip_idx = 0; + if ((idev = in6_dev_get(dev)) == NULL) + goto cont; + read_lock_bh(&idev->lock); + switch (type) { + case UNICAST_ADDR: + /* unicast address incl. temp addr */ + for (ifa = idev->addr_list; ifa; + ifa = ifa->if_next, ip_idx++) { + if (ip_idx < s_ip_idx) + continue; + err = inet6_fill_ifaddr(skb, ifa, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWADDR, + NLM_F_MULTI); + if (err <= 0) + break; + } + break; + case MULTICAST_ADDR: + /* multicast address */ + for (ifmca = idev->mc_list; ifmca; + ifmca = ifmca->next, ip_idx++) { + if (ip_idx < s_ip_idx) + continue; + err = inet6_fill_ifmcaddr(skb, ifmca, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_GETMULTICAST, + NLM_F_MULTI); + if (err <= 0) + break; + } + break; + case ANYCAST_ADDR: + /* anycast address */ + for (ifaca = idev->ac_list; ifaca; + ifaca = ifaca->aca_next, ip_idx++) { + if (ip_idx < s_ip_idx) + continue; + err = inet6_fill_ifacaddr(skb, ifaca, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_GETANYCAST, + NLM_F_MULTI); + if (err <= 0) + break; + } + break; + default: + break; + } + read_unlock_bh(&idev->lock); + in6_dev_put(idev); - if (in6_dump_addrs(idev, skb, cb, type, - s_ip_idx, &ip_idx) <= 0) - goto done; + if (err <= 0) + break; cont: - idx++; - } + idx++; } -done: - rcu_read_unlock(); - cb->args[0] = h; - cb->args[1] = idx; - cb->args[2] = ip_idx; - + cb->args[0] = idx; + cb->args[1] = ip_idx; return skb->len; } @@ -3853,7 +3830,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int h, s_h; - int idx = 0, s_idx; + int idx = 0, err, s_idx; struct net_device *dev; struct inet6_dev *idev; struct hlist_head *head; diff --git a/trunk/net/ipv6/anycast.c b/trunk/net/ipv6/anycast.c index f1c74c8ef9de..2f00ca83f049 100644 --- a/trunk/net/ipv6/anycast.c +++ b/trunk/net/ipv6/anycast.c @@ -431,9 +431,9 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) struct net *net = seq_file_net(seq); state->idev = NULL; - for_each_netdev_rcu(net, state->dev) { + for_each_netdev(net, state->dev) { struct inet6_dev *idev; - idev = __in6_dev_get(state->dev); + idev = in6_dev_get(state->dev); if (!idev) continue; read_lock_bh(&idev->lock); @@ -443,6 +443,7 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) break; } read_unlock_bh(&idev->lock); + in6_dev_put(idev); } return im; } @@ -453,15 +454,16 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im im = im->aca_next; while (!im) { - if (likely(state->idev != NULL)) + if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); - - state->dev = next_net_device_rcu(state->dev); + in6_dev_put(state->idev); + } + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; break; } - state->idev = __in6_dev_get(state->dev); + state->idev = in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); @@ -480,30 +482,29 @@ static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos) } static void *ac6_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) + __acquires(dev_base_lock) { - rcu_read_lock(); + read_lock(&dev_base_lock); return ac6_get_idx(seq, *pos); } static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct ifacaddr6 *im = ac6_get_next(seq, v); - + struct ifacaddr6 *im; + im = ac6_get_next(seq, v); ++*pos; return im; } static void ac6_seq_stop(struct seq_file *seq, void *v) - __releases(RCU) + __releases(dev_base_lock) { struct ac6_iter_state *state = ac6_seq_private(seq); - if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); - state->idev = NULL; + in6_dev_put(state->idev); } - rcu_read_unlock(); + read_unlock(&dev_base_lock); } static int ac6_seq_show(struct seq_file *seq, void *v) diff --git a/trunk/net/ipv6/ip6_tunnel.c b/trunk/net/ipv6/ip6_tunnel.c index e5c0f6bb8314..1d614113a4ba 100644 --- a/trunk/net/ipv6/ip6_tunnel.c +++ b/trunk/net/ipv6/ip6_tunnel.c @@ -78,7 +78,7 @@ static void ip6_fb_tnl_dev_init(struct net_device *dev); static void ip6_tnl_dev_init(struct net_device *dev); static void ip6_tnl_dev_setup(struct net_device *dev); -static int ip6_tnl_net_id __read_mostly; +static int ip6_tnl_net_id; struct ip6_tnl_net { /* the IPv6 tunnel fallback device */ struct net_device *fb_tnl_dev; diff --git a/trunk/net/ipv6/mcast.c b/trunk/net/ipv6/mcast.c index 1f9c44442e65..f9fcf690bd5d 100644 --- a/trunk/net/ipv6/mcast.c +++ b/trunk/net/ipv6/mcast.c @@ -2375,9 +2375,9 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) struct net *net = seq_file_net(seq); state->idev = NULL; - for_each_netdev_rcu(net, state->dev) { + for_each_netdev(net, state->dev) { struct inet6_dev *idev; - idev = __in6_dev_get(state->dev); + idev = in6_dev_get(state->dev); if (!idev) continue; read_lock_bh(&idev->lock); @@ -2387,6 +2387,7 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) break; } read_unlock_bh(&idev->lock); + in6_dev_put(idev); } return im; } @@ -2397,15 +2398,16 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr im = im->next; while (!im) { - if (likely(state->idev != NULL)) + if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); - - state->dev = next_net_device_rcu(state->dev); + in6_dev_put(state->idev); + } + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; break; } - state->idev = __in6_dev_get(state->dev); + state->idev = in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); @@ -2424,31 +2426,31 @@ static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) } static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) + __acquires(dev_base_lock) { - rcu_read_lock(); + read_lock(&dev_base_lock); return igmp6_mc_get_idx(seq, *pos); } static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); - + struct ifmcaddr6 *im; + im = igmp6_mc_get_next(seq, v); ++*pos; return im; } static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) - __releases(RCU) + __releases(dev_base_lock) { struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); - if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); + in6_dev_put(state->idev); state->idev = NULL; } state->dev = NULL; - rcu_read_unlock(); + read_unlock(&dev_base_lock); } static int igmp6_mc_seq_show(struct seq_file *seq, void *v) @@ -2505,9 +2507,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) state->idev = NULL; state->im = NULL; - for_each_netdev_rcu(net, state->dev) { + for_each_netdev(net, state->dev) { struct inet6_dev *idev; - idev = __in6_dev_get(state->dev); + idev = in6_dev_get(state->dev); if (unlikely(idev == NULL)) continue; read_lock_bh(&idev->lock); @@ -2523,6 +2525,7 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) spin_unlock_bh(&im->mca_lock); } read_unlock_bh(&idev->lock); + in6_dev_put(idev); } return psf; } @@ -2536,15 +2539,16 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s spin_unlock_bh(&state->im->mca_lock); state->im = state->im->next; while (!state->im) { - if (likely(state->idev != NULL)) + if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); - - state->dev = next_net_device_rcu(state->dev); + in6_dev_put(state->idev); + } + state->dev = next_net_device(state->dev); if (!state->dev) { state->idev = NULL; goto out; } - state->idev = __in6_dev_get(state->dev); + state->idev = in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); @@ -2569,9 +2573,9 @@ static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) } static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) + __acquires(dev_base_lock) { - rcu_read_lock(); + read_lock(&dev_base_lock); return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } @@ -2587,7 +2591,7 @@ static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) - __releases(RCU) + __releases(dev_base_lock) { struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (likely(state->im != NULL)) { @@ -2596,10 +2600,11 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) } if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); + in6_dev_put(state->idev); state->idev = NULL; } state->dev = NULL; - rcu_read_unlock(); + read_unlock(&dev_base_lock); } static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) diff --git a/trunk/net/ipv6/sit.c b/trunk/net/ipv6/sit.c index d9deaa7753ef..b6e145a673ab 100644 --- a/trunk/net/ipv6/sit.c +++ b/trunk/net/ipv6/sit.c @@ -66,7 +66,7 @@ static void ipip6_fb_tunnel_init(struct net_device *dev); static void ipip6_tunnel_init(struct net_device *dev); static void ipip6_tunnel_setup(struct net_device *dev); -static int sit_net_id __read_mostly; +static int sit_net_id; struct sit_net { struct ip_tunnel *tunnels_r_l[HASH_SIZE]; struct ip_tunnel *tunnels_r[HASH_SIZE]; diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index de709091b26d..696a22f034e8 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -1851,7 +1851,7 @@ static int tcp_v6_init_sock(struct sock *sk) */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; - tp->mss_cache = TCP_MSS_DEFAULT; + tp->mss_cache = 536; tp->reordering = sysctl_tcp_reordering; diff --git a/trunk/net/iucv/iucv.c b/trunk/net/iucv/iucv.c index 3b1f5f5f8de7..3973d0e61e56 100644 --- a/trunk/net/iucv/iucv.c +++ b/trunk/net/iucv/iucv.c @@ -1768,6 +1768,7 @@ static void iucv_tasklet_fn(unsigned long ignored) */ static void iucv_work_fn(struct work_struct *work) { + typedef void iucv_irq_fn(struct iucv_irq_data *); LIST_HEAD(work_queue); struct iucv_irq_list *p, *n; @@ -1877,25 +1878,14 @@ int iucv_path_table_empty(void) static int iucv_pm_freeze(struct device *dev) { int cpu; - struct iucv_irq_list *p, *n; int rc = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "iucv_pm_freeze\n"); #endif - if (iucv_pm_state != IUCV_PM_FREEZING) { - for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) - smp_call_function_single(cpu, iucv_block_cpu_almost, - NULL, 1); - cancel_work_sync(&iucv_work); - list_for_each_entry_safe(p, n, &iucv_work_queue, list) { - list_del_init(&p->list); - iucv_sever_pathid(p->data.ippathid, - iucv_error_no_listener); - kfree(p); - } - } iucv_pm_state = IUCV_PM_FREEZING; + for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) + smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) rc = dev->driver->pm->freeze(dev); if (iucv_path_table_empty()) diff --git a/trunk/net/key/af_key.c b/trunk/net/key/af_key.c index 478c8b32a5fb..86b2c22d0918 100644 --- a/trunk/net/key/af_key.c +++ b/trunk/net/key/af_key.c @@ -35,7 +35,7 @@ #define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x)) #define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x)) -static int pfkey_net_id __read_mostly; +static int pfkey_net_id; struct netns_pfkey { /* List of all pfkey sockets. */ struct hlist_head table; diff --git a/trunk/net/netfilter/nf_conntrack_proto_dccp.c b/trunk/net/netfilter/nf_conntrack_proto_dccp.c index 80abdf297b36..1b816a2ea813 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_dccp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_dccp.c @@ -384,7 +384,7 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = }; /* this module per-net specifics */ -static int dccp_net_id __read_mostly; +static int dccp_net_id; struct dccp_net { int dccp_loose; unsigned int dccp_timeout[CT_DCCP_MAX + 1]; diff --git a/trunk/net/netfilter/nf_conntrack_proto_gre.c b/trunk/net/netfilter/nf_conntrack_proto_gre.c index 91d0e719d67c..a54a0af0edba 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_gre.c +++ b/trunk/net/netfilter/nf_conntrack_proto_gre.c @@ -43,7 +43,7 @@ #define GRE_TIMEOUT (30 * HZ) #define GRE_STREAM_TIMEOUT (180 * HZ) -static int proto_gre_net_id __read_mostly; +static int proto_gre_net_id; struct netns_proto_gre { rwlock_t keymap_lock; struct list_head keymap_list; diff --git a/trunk/net/netlink/af_netlink.c b/trunk/net/netlink/af_netlink.c index eff5b0ddc5ca..f30d596dbc58 100644 --- a/trunk/net/netlink/af_netlink.c +++ b/trunk/net/netlink/af_netlink.c @@ -498,7 +498,7 @@ static int netlink_release(struct socket *sock) skb_queue_purge(&sk->sk_write_queue); - if (nlk->pid) { + if (nlk->pid && !nlk->subscriptions) { struct netlink_notify n = { .net = sock_net(sk), .protocol = sk->sk_protocol, diff --git a/trunk/net/phonet/af_phonet.c b/trunk/net/phonet/af_phonet.c index 526d0273991a..8d3a55b4a30c 100644 --- a/trunk/net/phonet/af_phonet.c +++ b/trunk/net/phonet/af_phonet.c @@ -35,6 +35,7 @@ /* Transport protocol registration */ static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; +static DEFINE_SPINLOCK(proto_tab_lock); static struct phonet_protocol *phonet_proto_get(int protocol) { @@ -43,11 +44,11 @@ static struct phonet_protocol *phonet_proto_get(int protocol) if (protocol >= PHONET_NPROTO) return NULL; - rcu_read_lock(); - pp = rcu_dereference(proto_tab[protocol]); + spin_lock(&proto_tab_lock); + pp = proto_tab[protocol]; if (pp && !try_module_get(pp->prot->owner)) pp = NULL; - rcu_read_unlock(); + spin_unlock(&proto_tab_lock); return pp; } @@ -438,8 +439,6 @@ static struct packet_type phonet_packet_type __read_mostly = { .func = phonet_rcv, }; -static DEFINE_MUTEX(proto_tab_lock); - int __init_or_module phonet_proto_register(int protocol, struct phonet_protocol *pp) { @@ -452,12 +451,12 @@ int __init_or_module phonet_proto_register(int protocol, if (err) return err; - mutex_lock(&proto_tab_lock); + spin_lock(&proto_tab_lock); if (proto_tab[protocol]) err = -EBUSY; else - rcu_assign_pointer(proto_tab[protocol], pp); - mutex_unlock(&proto_tab_lock); + proto_tab[protocol] = pp; + spin_unlock(&proto_tab_lock); return err; } @@ -465,11 +464,10 @@ EXPORT_SYMBOL(phonet_proto_register); void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) { - mutex_lock(&proto_tab_lock); + spin_lock(&proto_tab_lock); BUG_ON(proto_tab[protocol] != pp); - rcu_assign_pointer(proto_tab[protocol], NULL); - mutex_unlock(&proto_tab_lock); - synchronize_rcu(); + proto_tab[protocol] = NULL; + spin_unlock(&proto_tab_lock); proto_unregister(pp->prot); } EXPORT_SYMBOL(phonet_proto_unregister); diff --git a/trunk/net/phonet/pn_dev.c b/trunk/net/phonet/pn_dev.c index d87388c94b00..6d64fda1afc9 100644 --- a/trunk/net/phonet/pn_dev.c +++ b/trunk/net/phonet/pn_dev.c @@ -34,7 +34,7 @@ #include struct phonet_routes { - struct mutex lock; + spinlock_t lock; struct net_device *table[64]; }; @@ -43,7 +43,7 @@ struct phonet_net { struct phonet_routes routes; }; -int phonet_net_id __read_mostly; +int phonet_net_id; struct phonet_device_list *phonet_device_list(struct net *net) { @@ -61,8 +61,7 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev) pnd->netdev = dev; bitmap_zero(pnd->addrs, 64); - BUG_ON(!mutex_is_locked(&pndevs->lock)); - list_add_rcu(&pnd->list, &pndevs->list); + list_add(&pnd->list, &pndevs->list); return pnd; } @@ -71,7 +70,6 @@ static struct phonet_device *__phonet_get(struct net_device *dev) struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; - BUG_ON(!mutex_is_locked(&pndevs->lock)); list_for_each_entry(pnd, &pndevs->list, list) { if (pnd->netdev == dev) return pnd; @@ -79,18 +77,6 @@ static struct phonet_device *__phonet_get(struct net_device *dev) return NULL; } -static struct phonet_device *__phonet_get_rcu(struct net_device *dev) -{ - struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); - struct phonet_device *pnd; - - list_for_each_entry_rcu(pnd, &pndevs->list, list) { - if (pnd->netdev == dev) - return pnd; - } - return NULL; -} - static void phonet_device_destroy(struct net_device *dev) { struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); @@ -98,11 +84,11 @@ static void phonet_device_destroy(struct net_device *dev) ASSERT_RTNL(); - mutex_lock(&pndevs->lock); + spin_lock_bh(&pndevs->lock); pnd = __phonet_get(dev); if (pnd) - list_del_rcu(&pnd->list); - mutex_unlock(&pndevs->lock); + list_del(&pnd->list); + spin_unlock_bh(&pndevs->lock); if (pnd) { u8 addr; @@ -120,8 +106,8 @@ struct net_device *phonet_device_get(struct net *net) struct phonet_device *pnd; struct net_device *dev = NULL; - rcu_read_lock(); - list_for_each_entry_rcu(pnd, &pndevs->list, list) { + spin_lock_bh(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { dev = pnd->netdev; BUG_ON(!dev); @@ -132,7 +118,7 @@ struct net_device *phonet_device_get(struct net *net) } if (dev) dev_hold(dev); - rcu_read_unlock(); + spin_unlock_bh(&pndevs->lock); return dev; } @@ -142,7 +128,7 @@ int phonet_address_add(struct net_device *dev, u8 addr) struct phonet_device *pnd; int err = 0; - mutex_lock(&pndevs->lock); + spin_lock_bh(&pndevs->lock); /* Find or create Phonet-specific device data */ pnd = __phonet_get(dev); if (pnd == NULL) @@ -151,7 +137,7 @@ int phonet_address_add(struct net_device *dev, u8 addr) err = -ENOMEM; else if (test_and_set_bit(addr >> 2, pnd->addrs)) err = -EEXIST; - mutex_unlock(&pndevs->lock); + spin_unlock_bh(&pndevs->lock); return err; } @@ -161,32 +147,27 @@ int phonet_address_del(struct net_device *dev, u8 addr) struct phonet_device *pnd; int err = 0; - mutex_lock(&pndevs->lock); + spin_lock_bh(&pndevs->lock); pnd = __phonet_get(dev); - if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) { + if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) err = -EADDRNOTAVAIL; - pnd = NULL; - } else if (bitmap_empty(pnd->addrs, 64)) - list_del_rcu(&pnd->list); - else - pnd = NULL; - mutex_unlock(&pndevs->lock); - - if (pnd) { - synchronize_rcu(); + else if (bitmap_empty(pnd->addrs, 64)) { + list_del(&pnd->list); kfree(pnd); } + spin_unlock_bh(&pndevs->lock); return err; } /* Gets a source address toward a destination, through a interface. */ u8 phonet_address_get(struct net_device *dev, u8 daddr) { + struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); struct phonet_device *pnd; u8 saddr; - rcu_read_lock(); - pnd = __phonet_get_rcu(dev); + spin_lock_bh(&pndevs->lock); + pnd = __phonet_get(dev); if (pnd) { BUG_ON(bitmap_empty(pnd->addrs, 64)); @@ -197,7 +178,7 @@ u8 phonet_address_get(struct net_device *dev, u8 daddr) saddr = find_first_bit(pnd->addrs, 64) << 2; } else saddr = PN_NO_ADDR; - rcu_read_unlock(); + spin_unlock_bh(&pndevs->lock); if (saddr == PN_NO_ADDR) { /* Fallback to another device */ @@ -219,8 +200,8 @@ int phonet_address_lookup(struct net *net, u8 addr) struct phonet_device *pnd; int err = -EADDRNOTAVAIL; - rcu_read_lock(); - list_for_each_entry_rcu(pnd, &pndevs->list, list) { + spin_lock_bh(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { /* Don't allow unregistering devices! */ if ((pnd->netdev->reg_state != NETREG_REGISTERED) || ((pnd->netdev->flags & IFF_UP)) != IFF_UP) @@ -232,7 +213,7 @@ int phonet_address_lookup(struct net *net, u8 addr) } } found: - rcu_read_unlock(); + spin_unlock_bh(&pndevs->lock); return err; } @@ -267,22 +248,17 @@ static void phonet_route_autodel(struct net_device *dev) /* Remove left-over Phonet routes */ bitmap_zero(deleted, 64); - mutex_lock(&pnn->routes.lock); + spin_lock_bh(&pnn->routes.lock); for (i = 0; i < 64; i++) if (dev == pnn->routes.table[i]) { - rcu_assign_pointer(pnn->routes.table[i], NULL); set_bit(i, deleted); + pnn->routes.table[i] = NULL; + dev_put(dev); } - mutex_unlock(&pnn->routes.lock); - - if (bitmap_empty(deleted, 64)) - return; /* short-circuit RCU */ - synchronize_rcu(); + spin_unlock_bh(&pnn->routes.lock); for (i = find_first_bit(deleted, 64); i < 64; - i = find_next_bit(deleted, 64, i + 1)) { + i = find_next_bit(deleted, 64, i + 1)) rtm_phonet_notify(RTM_DELROUTE, dev, i); - dev_put(dev); - } } /* notify Phonet of device events */ @@ -323,8 +299,8 @@ static int phonet_init_net(struct net *net) } INIT_LIST_HEAD(&pnn->pndevs.list); - mutex_init(&pnn->pndevs.lock); - mutex_init(&pnn->routes.lock); + spin_lock_init(&pnn->pndevs.lock); + spin_lock_init(&pnn->routes.lock); net_assign_generic(net, phonet_net_id, pnn); return 0; } @@ -385,13 +361,13 @@ int phonet_route_add(struct net_device *dev, u8 daddr) int err = -EEXIST; daddr = daddr >> 2; - mutex_lock(&routes->lock); + spin_lock_bh(&routes->lock); if (routes->table[daddr] == NULL) { - rcu_assign_pointer(routes->table[daddr], dev); + routes->table[daddr] = dev; dev_hold(dev); err = 0; } - mutex_unlock(&routes->lock); + spin_unlock_bh(&routes->lock); return err; } @@ -399,20 +375,17 @@ int phonet_route_del(struct net_device *dev, u8 daddr) { struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); struct phonet_routes *routes = &pnn->routes; + int err = -ENOENT; daddr = daddr >> 2; - mutex_lock(&routes->lock); - if (dev == routes->table[daddr]) - rcu_assign_pointer(routes->table[daddr], NULL); - else - dev = NULL; - mutex_unlock(&routes->lock); - - if (!dev) - return -ENOENT; - synchronize_rcu(); - dev_put(dev); - return 0; + spin_lock_bh(&routes->lock); + if (dev == routes->table[daddr]) { + routes->table[daddr] = NULL; + dev_put(dev); + err = 0; + } + spin_unlock_bh(&routes->lock); + return err; } struct net_device *phonet_route_get(struct net *net, u8 daddr) @@ -424,9 +397,9 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr) ASSERT_RTNL(); /* no need to hold the device */ daddr >>= 2; - rcu_read_lock(); - dev = rcu_dereference(routes->table[daddr]); - rcu_read_unlock(); + spin_lock_bh(&routes->lock); + dev = routes->table[daddr]; + spin_unlock_bh(&routes->lock); return dev; } @@ -436,12 +409,11 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr) struct phonet_routes *routes = &pnn->routes; struct net_device *dev; - daddr >>= 2; - rcu_read_lock(); - dev = rcu_dereference(routes->table[daddr]); + spin_lock_bh(&routes->lock); + dev = routes->table[daddr >> 2]; if (dev) dev_hold(dev); - rcu_read_unlock(); + spin_unlock_bh(&routes->lock); if (!dev) dev = phonet_device_get(net); /* Default route */ diff --git a/trunk/net/phonet/pn_netlink.c b/trunk/net/phonet/pn_netlink.c index 2e6c7eb8e76a..609e509b369b 100644 --- a/trunk/net/phonet/pn_netlink.c +++ b/trunk/net/phonet/pn_netlink.c @@ -131,8 +131,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) int addr_idx = 0, addr_start_idx = cb->args[1]; pndevs = phonet_device_list(sock_net(skb->sk)); - rcu_read_lock(); - list_for_each_entry_rcu(pnd, &pndevs->list, list) { + spin_lock_bh(&pndevs->lock); + list_for_each_entry(pnd, &pndevs->list, list) { u8 addr; if (dev_idx > dev_start_idx) @@ -154,7 +154,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) } out: - rcu_read_unlock(); + spin_unlock_bh(&pndevs->lock); cb->args[0] = dev_idx; cb->args[1] = addr_idx; diff --git a/trunk/net/sched/act_mirred.c b/trunk/net/sched/act_mirred.c index 797479369881..b9aaab4e0354 100644 --- a/trunk/net/sched/act_mirred.c +++ b/trunk/net/sched/act_mirred.c @@ -65,53 +65,48 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, struct tc_mirred *parm; struct tcf_mirred *m; struct tcf_common *pc; - struct net_device *dev; - int ret, ok_push = 0; + struct net_device *dev = NULL; + int ret = 0, err; + int ok_push = 0; if (nla == NULL) return -EINVAL; - ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); - if (ret < 0) - return ret; + + err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); + if (err < 0) + return err; + if (tb[TCA_MIRRED_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_MIRRED_PARMS]); - switch (parm->eaction) { - case TCA_EGRESS_MIRROR: - case TCA_EGRESS_REDIR: - break; - default: - return -EINVAL; - } + if (parm->ifindex) { dev = __dev_get_by_index(&init_net, parm->ifindex); if (dev == NULL) return -ENODEV; switch (dev->type) { - case ARPHRD_TUNNEL: - case ARPHRD_TUNNEL6: - case ARPHRD_SIT: - case ARPHRD_IPGRE: - case ARPHRD_VOID: - case ARPHRD_NONE: - ok_push = 0; - break; - default: - ok_push = 1; - break; + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + ok_push = 0; + break; + default: + ok_push = 1; + break; } - } else { - dev = NULL; } pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); if (!pc) { - if (dev == NULL) + if (!parm->ifindex) return -EINVAL; pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, &mirred_idx_gen, &mirred_hash_info); if (IS_ERR(pc)) - return PTR_ERR(pc); + return PTR_ERR(pc); ret = ACT_P_CREATED; } else { if (!ovr) { @@ -124,12 +119,12 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, spin_lock_bh(&m->tcf_lock); m->tcf_action = parm->action; m->tcfm_eaction = parm->eaction; - if (dev != NULL) { + if (parm->ifindex) { m->tcfm_ifindex = parm->ifindex; if (ret != ACT_P_CREATED) dev_put(m->tcfm_dev); - dev_hold(dev); m->tcfm_dev = dev; + dev_hold(dev); m->tcfm_ok_push = ok_push; } spin_unlock_bh(&m->tcf_lock); @@ -153,32 +148,47 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, { struct tcf_mirred *m = a->priv; struct net_device *dev; - struct sk_buff *skb2; - u32 at; - int retval, err = 1; + struct sk_buff *skb2 = NULL; + u32 at = G_TC_AT(skb->tc_verd); spin_lock(&m->tcf_lock); - m->tcf_tm.lastuse = jiffies; dev = m->tcfm_dev; - if (!(dev->flags & IFF_UP)) { + m->tcf_tm.lastuse = jiffies; + + if (!(dev->flags&IFF_UP) ) { if (net_ratelimit()) printk("mirred to Houston: device %s is gone!\n", dev->name); - goto out; +bad_mirred: + if (skb2 != NULL) + kfree_skb(skb2); + m->tcf_qstats.overlimits++; + m->tcf_bstats.bytes += qdisc_pkt_len(skb); + m->tcf_bstats.packets++; + spin_unlock(&m->tcf_lock); + /* should we be asking for packet to be dropped? + * may make sense for redirect case only + */ + return TC_ACT_SHOT; } skb2 = skb_act_clone(skb, GFP_ATOMIC); if (skb2 == NULL) - goto out; + goto bad_mirred; + if (m->tcfm_eaction != TCA_EGRESS_MIRROR && + m->tcfm_eaction != TCA_EGRESS_REDIR) { + if (net_ratelimit()) + printk("tcf_mirred unknown action %d\n", + m->tcfm_eaction); + goto bad_mirred; + } m->tcf_bstats.bytes += qdisc_pkt_len(skb2); m->tcf_bstats.packets++; - at = G_TC_AT(skb->tc_verd); - if (!(at & AT_EGRESS)) { + if (!(at & AT_EGRESS)) if (m->tcfm_ok_push) skb_push(skb2, skb2->dev->hard_header_len); - } /* mirror is always swallowed */ if (m->tcfm_eaction != TCA_EGRESS_MIRROR) @@ -187,23 +197,8 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, skb2->dev = dev; skb2->iif = skb->dev->ifindex; dev_queue_xmit(skb2); - err = 0; - -out: - if (err) { - m->tcf_qstats.overlimits++; - m->tcf_bstats.bytes += qdisc_pkt_len(skb); - m->tcf_bstats.packets++; - /* should we be asking for packet to be dropped? - * may make sense for redirect case only - */ - retval = TC_ACT_SHOT; - } else { - retval = m->tcf_action; - } spin_unlock(&m->tcf_lock); - - return retval; + return m->tcf_action; } static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index 5173c1e1b19c..4ae6aa562f2b 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -119,26 +119,32 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, spin_unlock(root_lock); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); - HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); - if (dev_xmit_complete(ret)) { - /* Driver sent out skb successfully or skb was consumed */ + switch (ret) { + case NETDEV_TX_OK: + /* Driver sent out skb successfully */ ret = qdisc_qlen(q); - } else if (ret == NETDEV_TX_LOCKED) { + break; + + case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); - } else { + break; + + default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); + break; } if (ret && (netif_tx_queue_stopped(txq) || diff --git a/trunk/net/sctp/associola.c b/trunk/net/sctp/associola.c index 7eed77a39d0d..8450960df24f 100644 --- a/trunk/net/sctp/associola.c +++ b/trunk/net/sctp/associola.c @@ -1485,13 +1485,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, - sctp_scope_t scope, gfp_t gfp) + gfp_t gfp) { + sctp_scope_t scope; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ + scope = sctp_scope(&asoc->peer.active_path->ipaddr); flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; diff --git a/trunk/net/sctp/sm_statefuns.c b/trunk/net/sctp/sm_statefuns.c index d4df45022ffa..c8fae1983dd1 100644 --- a/trunk/net/sctp/sm_statefuns.c +++ b/trunk/net/sctp/sm_statefuns.c @@ -384,11 +384,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, if (!new_asoc) goto nomem; - if (sctp_assoc_set_bind_addr_from_ep(new_asoc, - sctp_scope(sctp_source(chunk)), - GFP_ATOMIC) < 0) - goto nomem_init; - /* The call, sctp_process_init(), can fail on memory allocation. */ if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, sctp_source(chunk), @@ -406,6 +401,9 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, len = ntohs(err_chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) + goto nomem_init; + repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem_init; @@ -1454,10 +1452,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( if (!new_asoc) goto nomem; - if (sctp_assoc_set_bind_addr_from_ep(new_asoc, - sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) - goto nomem; - /* In the outbound INIT ACK the endpoint MUST copy its current * Verification Tag and Peers Verification tag into a reserved * place (local tie-tag and per tie-tag) within the state cookie. @@ -1494,6 +1488,9 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( sizeof(sctp_chunkhdr_t); } + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) + goto nomem; + repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); if (!repl) goto nomem; diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 66b1f02b17ba..4085db99033d 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -1080,13 +1080,6 @@ static int __sctp_connect(struct sock* sk, err = -ENOMEM; goto out_free; } - - err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, - GFP_KERNEL); - if (err < 0) { - goto out_free; - } - } /* Prime the peer's transport structures. */ @@ -1102,6 +1095,11 @@ static int __sctp_connect(struct sock* sk, walk_size += af->sockaddr_len; } + err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); + if (err < 0) { + goto out_free; + } + /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ @@ -1276,30 +1274,22 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, } /* - * New (hopefully final) interface for the API. - * We use the sctp_getaddrs_old structure so that use-space library - * can avoid any unnecessary allocations. The only defferent part - * is that we store the actual length of the address buffer into the - * addrs_num structure member. That way we can re-use the existing - * code. + * New (hopefully final) interface for the API. The option buffer is used + * both for the returned association id and the addresses. */ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, char __user *optval, int __user *optlen) { - struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; - if (len < sizeof(param)) + if (len < sizeof(assoc_id)) return -EINVAL; - if (copy_from_user(¶m, optval, sizeof(param))) - return -EFAULT; - err = __sctp_setsockopt_connectx(sk, - (struct sockaddr __user *)param.addrs, - param.addr_num, &assoc_id); + (struct sockaddr __user *)(optval + sizeof(assoc_id)), + len - sizeof(assoc_id), &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) @@ -1699,11 +1689,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; } asoc = new_asoc; - err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); - if (err < 0) { - err = -ENOMEM; - goto out_free; - } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. @@ -1733,6 +1718,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, err = -ENOMEM; goto out_free; } + err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); + if (err < 0) { + err = -ENOMEM; + goto out_free; + } } /* ASSERT: we have a valid association at this point. */ diff --git a/trunk/net/sctp/transport.c b/trunk/net/sctp/transport.c index 3b141bb32faf..c256e4839316 100644 --- a/trunk/net/sctp/transport.c +++ b/trunk/net/sctp/transport.c @@ -308,8 +308,7 @@ void sctp_transport_route(struct sctp_transport *transport, /* Initialize sk->sk_rcv_saddr, if the transport is the * association's active path for getsockname(). */ - if (asoc && (!asoc->peer.primary_path || - (transport == asoc->peer.active_path))) + if (asoc && (transport == asoc->peer.active_path)) opt->pf->af->to_sk_saddr(&transport->saddr, asoc->base.sk); } else diff --git a/trunk/net/socket.c b/trunk/net/socket.c index 402abb39cbfe..befd9f5b1620 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -100,6 +100,14 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -2715,15 +2723,38 @@ static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { - struct ifreq __user *uifr; + struct ifreq ifr; + struct compat_ifmap __user *uifmap32; + mm_segment_t old_fs; int err; - uifr = compat_alloc_user_space(sizeof(*uifr)); - if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) - return -EFAULT; - - err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); - + uifmap32 = &uifr32->ifr_ifru.ifru_map; + switch (cmd) { + case SIOCSIFMAP: + err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); + err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); + err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); + err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); + err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); + err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); + err |= __get_user(ifr.ifr_map.port, &uifmap32->port); + if (err) + return -EFAULT; + break; + case SIOCSHWTSTAMP: + if (copy_from_user(&ifr, uifr32, sizeof(*uifr32))) + return -EFAULT; + ifr.ifr_data = compat_ptr(uifr32->ifr_ifru.ifru_data); + break; + default: + if (copy_from_user(&ifr, uifr32, sizeof(*uifr32))) + return -EFAULT; + break; + } + old_fs = get_fs(); + set_fs (KERNEL_DS); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ifr); + set_fs (old_fs); if (!err) { switch (cmd) { case SIOCGIFFLAGS: @@ -2740,7 +2771,18 @@ static int dev_ifsioc(struct net *net, struct socket *sock, case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: - if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) + if (copy_to_user(uifr32, &ifr, sizeof(*uifr32))) + return -EFAULT; + break; + case SIOCGIFMAP: + err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); + err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); + err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); + err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); + err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); + err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); + err |= __put_user(ifr.ifr_map.port, &uifmap32->port); + if (err) err = -EFAULT; break; } @@ -2748,65 +2790,6 @@ static int dev_ifsioc(struct net *net, struct socket *sock, return err; } -static int compat_sioc_ifmap(struct net *net, unsigned int cmd, - struct compat_ifreq __user *uifr32) -{ - struct ifreq ifr; - struct compat_ifmap __user *uifmap32; - mm_segment_t old_fs; - int err; - - uifmap32 = &uifr32->ifr_ifru.ifru_map; - err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); - err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); - err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); - err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); - err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); - err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); - err |= __get_user(ifr.ifr_map.port, &uifmap32->port); - if (err) - return -EFAULT; - - old_fs = get_fs(); - set_fs (KERNEL_DS); - err = dev_ioctl(net, cmd, (void __user *)&ifr); - set_fs (old_fs); - - if (cmd == SIOCGIFMAP && !err) { - err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); - err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); - err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); - err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); - err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); - err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); - err |= __put_user(ifr.ifr_map.port, &uifmap32->port); - if (err) - err = -EFAULT; - } - return err; -} - -static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) -{ - void __user *uptr; - compat_uptr_t uptr32; - struct ifreq __user *uifr; - - uifr = compat_alloc_user_space(sizeof (*uifr)); - if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) - return -EFAULT; - - if (get_user(uptr32, &uifr32->ifr_data)) - return -EFAULT; - - uptr = compat_ptr(uptr32); - - if (put_user(uptr, &uifr->ifr_data)) - return -EFAULT; - - return dev_ioctl(net, SIOCSHWTSTAMP, uifr); -} - struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ @@ -2909,6 +2892,173 @@ static int old_bridge_ioctl(compat_ulong_t __user *argp) return -EINVAL; } +struct atmif_sioc32 { + compat_int_t number; + compat_int_t length; + compat_caddr_t arg; +}; + +struct atm_iobuf32 { + compat_int_t length; + compat_caddr_t buffer; +}; + +#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct atmif_sioc32) +#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct atm_iobuf32) +#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct atmif_sioc32) +#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct atmif_sioc32) +#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct atmif_sioc32) +#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct atmif_sioc32) +#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct atmif_sioc32) +#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct atmif_sioc32) +#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct atmif_sioc32) +#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct atmif_sioc32) +#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct atmif_sioc32) +#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct atmif_sioc32) +#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct atmif_sioc32) +#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct atmif_sioc32) +#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct atmif_sioc32) +#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct atmif_sioc32) +#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct atmif_sioc32) + +static struct { + unsigned int cmd32; + unsigned int cmd; +} atm_ioctl_map[] = { + { ATM_GETLINKRATE32, ATM_GETLINKRATE }, + { ATM_GETNAMES32, ATM_GETNAMES }, + { ATM_GETTYPE32, ATM_GETTYPE }, + { ATM_GETESI32, ATM_GETESI }, + { ATM_GETADDR32, ATM_GETADDR }, + { ATM_RSTADDR32, ATM_RSTADDR }, + { ATM_ADDADDR32, ATM_ADDADDR }, + { ATM_DELADDR32, ATM_DELADDR }, + { ATM_GETCIRANGE32, ATM_GETCIRANGE }, + { ATM_SETCIRANGE32, ATM_SETCIRANGE }, + { ATM_SETESI32, ATM_SETESI }, + { ATM_SETESIF32, ATM_SETESIF }, + { ATM_GETSTAT32, ATM_GETSTAT }, + { ATM_GETSTATZ32, ATM_GETSTATZ }, + { ATM_GETLOOP32, ATM_GETLOOP }, + { ATM_SETLOOP32, ATM_SETLOOP }, + { ATM_QUERYLOOP32, ATM_QUERYLOOP } +}; + +#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map) + +static int do_atm_iobuf(struct net *net, struct socket *sock, + unsigned int cmd, unsigned long arg) +{ + struct atm_iobuf __user *iobuf; + struct atm_iobuf32 __user *iobuf32; + u32 data; + void __user *datap; + int len, err; + + iobuf = compat_alloc_user_space(sizeof(*iobuf)); + iobuf32 = compat_ptr(arg); + + if (get_user(len, &iobuf32->length) || + get_user(data, &iobuf32->buffer)) + return -EFAULT; + datap = compat_ptr(data); + if (put_user(len, &iobuf->length) || + put_user(datap, &iobuf->buffer)) + return -EFAULT; + + err = sock_do_ioctl(net, sock, cmd, (unsigned long)iobuf); + + if (!err) { + if (copy_in_user(&iobuf32->length, &iobuf->length, + sizeof(int))) + err = -EFAULT; + } + + return err; +} + +static int do_atmif_sioc(struct net *net, struct socket *sock, + unsigned int cmd, unsigned long arg) +{ + struct atmif_sioc __user *sioc; + struct atmif_sioc32 __user *sioc32; + u32 data; + void __user *datap; + int err; + + sioc = compat_alloc_user_space(sizeof(*sioc)); + sioc32 = compat_ptr(arg); + + if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) || + get_user(data, &sioc32->arg)) + return -EFAULT; + datap = compat_ptr(data); + if (put_user(datap, &sioc->arg)) + return -EFAULT; + + err = sock_do_ioctl(net, sock, cmd, (unsigned long) sioc); + + if (!err) { + if (copy_in_user(&sioc32->length, &sioc->length, + sizeof(int))) + err = -EFAULT; + } + return err; +} + +static int do_atm_ioctl(struct net *net, struct socket *sock, + unsigned int cmd32, unsigned long arg) +{ + int i; + unsigned int cmd = 0; + + switch (cmd32) { + case SONET_GETSTAT: + case SONET_GETSTATZ: + case SONET_GETDIAG: + case SONET_SETDIAG: + case SONET_CLRDIAG: + case SONET_SETFRAMING: + case SONET_GETFRAMING: + case SONET_GETFRSENSE: + return do_atmif_sioc(net, sock, cmd32, arg); + } + + for (i = 0; i < NR_ATM_IOCTL; i++) { + if (cmd32 == atm_ioctl_map[i].cmd32) { + cmd = atm_ioctl_map[i].cmd; + break; + } + } + if (i == NR_ATM_IOCTL) + return -EINVAL; + + switch (cmd) { + case ATM_GETNAMES: + return do_atm_iobuf(net, sock, cmd, arg); + + case ATM_GETLINKRATE: + case ATM_GETTYPE: + case ATM_GETESI: + case ATM_GETADDR: + case ATM_RSTADDR: + case ATM_ADDADDR: + case ATM_DELADDR: + case ATM_GETCIRANGE: + case ATM_SETCIRANGE: + case ATM_SETESI: + case ATM_SETESIF: + case ATM_GETSTAT: + case ATM_GETSTATZ: + case ATM_GETLOOP: + case ATM_SETLOOP: + case ATM_QUERYLOOP: + return do_atmif_sioc(net, sock, cmd, arg); + } + + return -EINVAL; +} + static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { @@ -2931,9 +3081,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); - case SIOCGIFMAP: - case SIOCSIFMAP: - return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: @@ -2948,8 +3095,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); - case SIOCSHWTSTAMP: - return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: @@ -2976,9 +3121,12 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: + case SIOCGIFMAP: + case SIOCSIFMAP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: + case SIOCSHWTSTAMP: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: @@ -2998,6 +3146,49 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); + case ATM_GETLINKRATE32: + case ATM_GETNAMES32: + case ATM_GETTYPE32: + case ATM_GETESI32: + case ATM_GETADDR32: + case ATM_RSTADDR32: + case ATM_ADDADDR32: + case ATM_DELADDR32: + case ATM_GETCIRANGE32: + case ATM_SETCIRANGE32: + case ATM_SETESI32: + case ATM_SETESIF32: + case ATM_GETSTAT32: + case ATM_GETSTATZ32: + case ATM_GETLOOP32: + case ATM_SETLOOP32: + case ATM_QUERYLOOP32: + case SONET_GETSTAT: + case SONET_GETSTATZ: + case SONET_GETDIAG: + case SONET_SETDIAG: + case SONET_CLRDIAG: + case SONET_SETFRAMING: + case SONET_GETFRAMING: + case SONET_GETFRSENSE: + return do_atm_ioctl(net, sock, cmd, arg); + + case ATMSIGD_CTRL: + case ATMARPD_CTRL: + case ATMLEC_CTRL: + case ATMLEC_MCAST: + case ATMLEC_DATA: + case ATM_SETSC: + case SIOCSIFATMTCP: + case SIOCMKCLIP: + case ATMARP_MKIP: + case ATMARP_SETENTRY: + case ATMARP_ENCAP: + case ATMTCP_CREATE: + case ATMTCP_REMOVE: + case ATMMPC_CTRL: + case ATMMPC_DATA: + case SIOCSARP: case SIOCGARP: case SIOCDARP: